merge autoland to mozilla-central. r=merge a=merge
authorSebastian Hengst <archaeopteryx@coole-files.de>
Wed, 11 Oct 2017 11:38:09 +0200
changeset 428140 a72bd6160609be2fa471be5f87955bdd47fafe22
parent 428082 acfdf5d0d1f33f0550d4f566adb1084d9c724b41 (current diff)
parent 428139 cc0c99d812fd1cb33e13450a1d025d1f7d69e244 (diff)
child 428141 20d9ad08dd36fe5230ad0ccf6cb3e4865d7851cf
push id97
push userfmarier@mozilla.com
push dateSat, 14 Oct 2017 01:12:59 +0000
reviewersmerge, merge
milestone58.0a1
merge autoland to mozilla-central. r=merge a=merge MozReview-Commit-ID: EmKghzIsF92
--- a/browser/components/places/PlacesUIUtils.jsm
+++ b/browser/components/places/PlacesUIUtils.jsm
@@ -577,17 +577,18 @@ this.PlacesUIUtils = {
    *
    * @return  a Places Transaction that can be transacted for performing the
    *          move/insert command.
    */
   getTransactionForData(aData, aType, aNewParentGuid, aIndex, aCopy) {
     if (!this.SUPPORTED_FLAVORS.includes(aData.type))
       throw new Error(`Unsupported '${aData.type}' data type`);
 
-    if ("itemGuid" in aData) {
+    if ("itemGuid" in aData && "instanceId" in aData &&
+        aData.instanceId == PlacesUtils.instanceId) {
       if (!this.PLACES_FLAVORS.includes(aData.type))
         throw new Error(`itemGuid unexpectedly set on ${aData.type} data`);
 
       let info = { guid: aData.itemGuid,
                    newParentGuid: aNewParentGuid,
                    newIndex: aIndex };
       if (aCopy) {
         info.excludingAnnotation = "Places/SmartBookmark";
--- a/browser/components/places/tests/browser/browser.ini
+++ b/browser/components/places/tests/browser/browser.ini
@@ -48,16 +48,18 @@ subsuite = clipboard
 [browser_library_middleclick.js]
 [browser_library_move_bookmarks.js]
 [browser_library_open_leak.js]
 [browser_library_openFlatContainer.js]
 [browser_library_panel_leak.js]
 [browser_library_search.js]
 [browser_library_views_liveupdate.js]
 [browser_markPageAsFollowedLink.js]
+[browser_paste_bookmarks.js]
+subsuite = clipboard
 [browser_paste_into_tags.js]
 subsuite = clipboard
 [browser_sidebarpanels_click.js]
 skip-if = true # temporarily disabled for breaking the treeview - bug 658744
 [browser_sort_in_library.js]
 [browser_stayopenmenu.js]
 [browser_toolbar_drop_text.js]
 [browser_toolbar_overflow.js]
new file mode 100644
--- /dev/null
+++ b/browser/components/places/tests/browser/browser_paste_bookmarks.js
@@ -0,0 +1,103 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+"use strict";
+
+const TEST_URL = "http://example.com/";
+const TEST_URL1 = "https://example.com/otherbrowser";
+
+var PlacesOrganizer;
+var ContentTree;
+var bookmark;
+var bookmarkId;
+
+add_task(async function setup() {
+  await PlacesUtils.bookmarks.eraseEverything();
+  let organizer = await promiseLibrary();
+
+  registerCleanupFunction(async function() {
+    await promiseLibraryClosed(organizer);
+    await PlacesUtils.bookmarks.eraseEverything();
+  });
+
+  PlacesOrganizer = organizer.PlacesOrganizer;
+  ContentTree = organizer.ContentTree;
+
+  info("Selecting BookmarksToolbar in the left pane");
+  PlacesOrganizer.selectLeftPaneQuery("BookmarksToolbar");
+
+  bookmark = await PlacesUtils.bookmarks.insert({
+    parentGuid: PlacesUtils.bookmarks.toolbarGuid,
+    url: TEST_URL,
+    title: "0"
+  });
+  bookmarkId = await PlacesUtils.promiseItemId(bookmark.guid);
+
+  ContentTree.view.selectItems([bookmarkId]);
+
+  await promiseClipboard(() => {
+    info("Copying selection");
+    ContentTree.view.controller.cut();
+  }, PlacesUtils.TYPE_X_MOZ_PLACE);
+});
+
+add_task(async function paste() {
+  info("Selecting UnfiledBookmarks in the left pane");
+  PlacesOrganizer.selectLeftPaneQuery("UnfiledBookmarks");
+
+  info("Pasting clipboard");
+  await ContentTree.view.controller.paste();
+
+  let tree = await PlacesUtils.promiseBookmarksTree(PlacesUtils.bookmarks.unfiledGuid);
+
+  Assert.equal(tree.children.length, 1,
+               "Should be one bookmark in the unfiled folder.");
+  Assert.equal(tree.children[0].title, "0",
+               "Should have the correct title");
+  Assert.equal(tree.children[0].uri, TEST_URL,
+               "Should have the correct URL");
+
+  await PlacesUtils.bookmarks.remove(tree.children[0].guid);
+});
+
+add_task(async function paste_from_different_instance() {
+  let xferable = Cc["@mozilla.org/widget/transferable;1"]
+                   .createInstance(Ci.nsITransferable);
+  xferable.init(null);
+
+  // Fake data on the clipboard to pretend this is from a different instance
+  // of Firefox.
+  let data = {
+    "title": "test",
+    "id": 32,
+    "instanceId": "FAKEFAKEFAKE",
+    "itemGuid": "ZBf_TYkrYGvW",
+    "parent": 452,
+    "dateAdded": 1464866275853000,
+    "lastModified": 1507638113352000,
+    "type": "text/x-moz-place",
+    "uri": TEST_URL1
+  };
+  data = JSON.stringify(data);
+
+  xferable.addDataFlavor(PlacesUtils.TYPE_X_MOZ_PLACE);
+  xferable.setTransferData(PlacesUtils.TYPE_X_MOZ_PLACE, PlacesUtils.toISupportsString(data),
+                           data.length * 2);
+
+  Services.clipboard.setData(xferable, null, Ci.nsIClipboard.kGlobalClipboard);
+
+  info("Pasting clipboard");
+
+  await ContentTree.view.controller.paste();
+
+  let tree = await PlacesUtils.promiseBookmarksTree(PlacesUtils.bookmarks.unfiledGuid);
+
+  Assert.equal(tree.children.length, 1,
+               "Should be one bookmark in the unfiled folder.");
+  Assert.equal(tree.children[0].title, "test",
+               "Should have the correct title");
+  Assert.equal(tree.children[0].uri, TEST_URL1,
+               "Should have the correct URL");
+
+  await PlacesUtils.bookmarks.remove(tree.children[0].guid);
+});
--- a/browser/installer/package-manifest.in
+++ b/browser/installer/package-manifest.in
@@ -141,21 +141,16 @@
 #endif
 #endif
 @BINPATH@/@DLL_PREFIX@lgpllibs@DLL_SUFFIX@
 #ifdef MOZ_FFVPX
 @BINPATH@/@DLL_PREFIX@mozavutil@DLL_SUFFIX@
 @BINPATH@/@DLL_PREFIX@mozavcodec@DLL_SUFFIX@
 #endif
 @RESPATH@/browser/blocklist.xml
-#ifdef XP_UNIX
-#ifndef XP_MACOSX
-@RESPATH@/run-mozilla.sh
-#endif
-#endif
 #ifdef XP_WIN
 #ifdef _AMD64_
 @BINPATH@/@DLL_PREFIX@qipcap64@DLL_SUFFIX@
 #else
 @BINPATH@/@DLL_PREFIX@qipcap@DLL_SUFFIX@
 #endif
 #endif
 
--- a/browser/themes/shared/customizableui/panelUI.inc.css
+++ b/browser/themes/shared/customizableui/panelUI.inc.css
@@ -1730,16 +1730,23 @@ toolbarpaletteitem[place=panel] > .toolb
   overflow-y: auto;
   overflow-x: hidden;
 }
 
 .widget-overflow-list {
   width: @wideMenuPanelWidth@;
 }
 
+/* In customize mode, the overflow list is constrained by its container,
+ * so we set width: auto to avoid the scrollbar not fitting.
+ */
+#customization-panelHolder > .widget-overflow-list {
+  width: auto;
+}
+
 toolbaritem[overflowedItem=true],
 .widget-overflow-list .toolbarbutton-1 {
   width: 100%;
   max-width: @wideMenuPanelWidth@;
   background-repeat: no-repeat;
   background-position: 0 center;
 }
 
--- a/devtools/client/netmonitor/src/components/status-bar.js
+++ b/devtools/client/netmonitor/src/components/status-bar.js
@@ -18,64 +18,73 @@ const {
 const {
   getFormattedSize,
   getFormattedTime,
 } = require("../utils/format-utils");
 const { L10N } = require("../utils/l10n");
 
 const { button, div } = DOM;
 
+const REQUESTS_COUNT_EMPTY = L10N.getStr("networkMenu.summary.requestsCountEmpty");
+const TOOLTIP_PERF = L10N.getStr("networkMenu.summary.tooltip.perf");
+const TOOLTIP_REQUESTS_COUNT = L10N.getStr("networkMenu.summary.tooltip.requestsCount");
+const TOOLTIP_TRANSFERRED = L10N.getStr("networkMenu.summary.tooltip.transferred");
+const TOOLTIP_FINISH = L10N.getStr("networkMenu.summary.tooltip.finish");
+const TOOLTIP_DOM_CONTENT_LOADED =
+        L10N.getStr("networkMenu.summary.tooltip.domContentLoaded");
+const TOOLTIP_LOAD = L10N.getStr("networkMenu.summary.tooltip.load");
+
 function StatusBar({ summary, openStatistics, timingMarkers }) {
   let { count, contentSize, transferredSize, millis } = summary;
   let {
     DOMContentLoaded,
     load,
   } = timingMarkers;
 
-  let countText = count === 0 ? L10N.getStr("networkMenu.summary.requestsCountEmpty") :
+  let countText = count === 0 ? REQUESTS_COUNT_EMPTY :
     PluralForm.get(
       count, L10N.getFormatStrWithNumbers("networkMenu.summary.requestsCount", count)
   );
   let transferText = L10N.getFormatStrWithNumbers("networkMenu.summary.transferred",
     getFormattedSize(contentSize), getFormattedSize(transferredSize));
   let finishText = L10N.getFormatStrWithNumbers("networkMenu.summary.finish",
     getFormattedTime(millis));
 
   return (
     div({ className: "devtools-toolbar devtools-toolbar-bottom" },
       button({
         className: "devtools-button requests-list-network-summary-button",
-        title: L10N.getStr("networkMenu.summary.tooltip.perf"),
+        title: TOOLTIP_PERF,
         onClick: openStatistics,
       },
         div({ className: "summary-info-icon" }),
       ),
       div({
         className: "status-bar-label requests-list-network-summary-count",
-        title: L10N.getStr("networkMenu.summary.tooltip.requestsCount"),
+        title: TOOLTIP_REQUESTS_COUNT,
       }, countText),
       count !== 0 &&
         div({
           className: "status-bar-label requests-list-network-summary-transfer",
-          title: L10N.getStr("networkMenu.summary.tooltip.transferred"),
+          title: TOOLTIP_TRANSFERRED,
         }, transferText),
       count !== 0 &&
         div({
           className: "status-bar-label requests-list-network-summary-finish",
-          title: L10N.getStr("networkMenu.summary.tooltip.finish"),
+          title: TOOLTIP_FINISH,
         }, finishText),
       DOMContentLoaded > -1 &&
         div({
           className: "status-bar-label dom-content-loaded",
-          title: L10N.getStr("networkMenu.summary.tooltip.domContentLoaded"),
+          title: TOOLTIP_DOM_CONTENT_LOADED,
         }, `DOMContentLoaded: ${getFormattedTime(DOMContentLoaded)}`),
       load > -1 &&
         div({
           className: "status-bar-label load",
-          title: L10N.getStr("networkMenu.summary.tooltip.load"),
+          title: TOOLTIP_LOAD,
         }, `load: ${getFormattedTime(load)}`),
     )
   );
 }
 
 StatusBar.displayName = "StatusBar";
 
 StatusBar.propTypes = {
--- a/dom/animation/KeyframeEffectReadOnly.cpp
+++ b/dom/animation/KeyframeEffectReadOnly.cpp
@@ -552,17 +552,16 @@ KeyframeEffectReadOnly::EnsureBaseStyle(
     return;
   }
 
   if (!aBaseStyleContext) {
     aBaseStyleContext =
       aPresContext->StyleSet()->AsServo()->GetBaseContextForElement(
           mTarget->mElement,
           aPresContext,
-          nullptr,
           aPseudoType,
           aComputedStyle);
   }
   RefPtr<RawServoAnimationValue> baseValue =
     Servo_ComputedValues_ExtractAnimationValue(aBaseStyleContext,
                                                aProperty.mProperty).Consume();
   mBaseStyleValuesForServo.Put(aProperty.mProperty, baseValue);
 }
--- a/dom/canvas/CanvasRenderingContext2D.cpp
+++ b/dom/canvas/CanvasRenderingContext2D.cpp
@@ -2872,19 +2872,17 @@ GetFontStyleForServo(Element* aElement, 
   // values (2em, bolder, etc.)
   if (aElement && aElement->IsInUncomposedDoc()) {
     // Inherit from the canvas element.
     aPresShell->FlushPendingNotifications(FlushType::Style);
     // We need to use ResolveStyleLazily, which involves traversal,
     // instead of ResolvestyleFor() because we need up-to-date style even if
     // the canvas element is display:none.
     parentStyle =
-      styleSet->ResolveStyleLazily(aElement,
-                                   CSSPseudoElementType::NotPseudo,
-                                   nullptr);
+      styleSet->ResolveStyleLazily(aElement, CSSPseudoElementType::NotPseudo);
   } else {
     RefPtr<RawServoDeclarationBlock> declarations =
       CreateFontDeclarationForServo(NS_LITERAL_STRING("10px sans-serif"),
                                     aPresShell->GetDocument());
     MOZ_ASSERT(declarations);
 
     parentStyle = aPresShell->StyleSet()->AsServo()->
       ResolveForDeclarations(nullptr, declarations);
--- a/dom/canvas/WebGL2ContextFramebuffers.cpp
+++ b/dom/canvas/WebGL2ContextFramebuffers.cpp
@@ -311,16 +311,18 @@ WebGL2Context::InvalidateSubFramebuffer(
 
 void
 WebGL2Context::ReadBuffer(GLenum mode)
 {
     const char funcName[] = "readBuffer";
     if (IsContextLost())
         return;
 
+    gl->MakeCurrent();
+
     if (mBoundReadFramebuffer) {
         mBoundReadFramebuffer->ReadBuffer(funcName, mode);
         return;
     }
 
     // Operating on the default framebuffer.
     if (mode != LOCAL_GL_NONE &&
         mode != LOCAL_GL_BACK)
--- a/dom/html/nsGenericHTMLElement.cpp
+++ b/dom/html/nsGenericHTMLElement.cpp
@@ -3047,17 +3047,17 @@ IsOrHasAncestorWithDisplayNone(Element* 
     if (sc) {
       if (styleSet->IsGecko()) {
         sc = styleSet->ResolveStyleFor(element, sc,
                                        LazyComputeBehavior::Assert);
       } else {
         // Call ResolveStyleLazily to protect against stale element data in
         // the tree when styled by Servo.
         sc = styleSet->AsServo()->ResolveStyleLazily(
-            element, CSSPseudoElementType::NotPseudo, nullptr);
+            element, CSSPseudoElementType::NotPseudo);
       }
     } else {
       sc = nsComputedDOMStyle::GetStyleContextNoFlush(element,
                                                       nullptr, aPresShell);
     }
     if (sc->StyleDisplay()->mDisplay == StyleDisplay::None) {
       return true;
     }
--- a/dom/media/GraphDriver.cpp
+++ b/dom/media/GraphDriver.cpp
@@ -21,18 +21,16 @@
 extern mozilla::LazyLogModule gMediaStreamGraphLog;
 #ifdef LOG
 #undef LOG
 #endif // LOG
 #define LOG(type, msg) MOZ_LOG(gMediaStreamGraphLog, type, msg)
 
 namespace mozilla {
 
-StaticRefPtr<nsIThreadPool> AsyncCubebTask::sThreadPool;
-
 GraphDriver::GraphDriver(MediaStreamGraphImpl* aGraphImpl)
   : mIterationStart(0),
     mIterationEnd(0),
     mGraphImpl(aGraphImpl),
     mWaitState(WAITSTATE_RUNNING),
     mCurrentTimeStamp(TimeStamp::Now()),
     mPreviousDriver(nullptr),
     mNextDriver(nullptr),
@@ -88,29 +86,16 @@ GraphDriver::StateComputedTime() const
   return mGraphImpl->mStateComputedTime;
 }
 
 void GraphDriver::EnsureNextIteration()
 {
   mGraphImpl->EnsureNextIteration();
 }
 
-void GraphDriver::Shutdown()
-{
-  if (AsAudioCallbackDriver()) {
-    LOG(LogLevel::Debug,
-        ("Releasing audio driver off main thread (GraphDriver::Shutdown)."));
-    RefPtr<AsyncCubebTask> releaseEvent =
-      new AsyncCubebTask(AsAudioCallbackDriver(), AsyncCubebOperation::SHUTDOWN);
-    releaseEvent->Dispatch(NS_DISPATCH_SYNC);
-  } else {
-    Stop();
-  }
-}
-
 bool GraphDriver::Switching()
 {
   GraphImpl()->GetMonitor().AssertCurrentThreadOwns();
   return mNextDriver || mPreviousDriver;
 }
 
 GraphDriver* GraphDriver::NextDriver()
 {
@@ -233,22 +218,16 @@ ThreadedDriver::Start()
     if (NS_SUCCEEDED(rv)) {
       rv = mThread->EventTarget()->Dispatch(event.forget(), NS_DISPATCH_NORMAL);
       mScheduled = NS_SUCCEEDED(rv);
     }
   }
 }
 
 void
-ThreadedDriver::Resume()
-{
-  Start();
-}
-
-void
 ThreadedDriver::Revive()
 {
   // Note: only called on MainThread, without monitor
   // We know were weren't in a running state
   LOG(LogLevel::Debug, ("AudioCallbackDriver reviving."));
   // If we were switching, switch now. Otherwise, tell thread to run the main
   // loop again.
   MonitorAutoLock mon(mGraphImpl->GetMonitor());
@@ -263,17 +242,17 @@ ThreadedDriver::Revive()
 }
 
 void
 ThreadedDriver::RemoveCallback()
 {
 }
 
 void
-ThreadedDriver::Stop()
+ThreadedDriver::Shutdown()
 {
   NS_ASSERTION(NS_IsMainThread(), "Must be called on main thread");
   // mGraph's thread is not running so it's OK to do whatever here
   LOG(LogLevel::Debug, ("Stopping threads for MediaStreamGraph %p", this));
 
   if (mThread) {
     mThread->Shutdown();
     mThread = nullptr;
@@ -485,46 +464,30 @@ AsyncCubebTask::AsyncCubebTask(AudioCall
   NS_WARNING_ASSERTION(mDriver->mAudioStream || aOperation == INIT,
                        "No audio stream!");
 }
 
 AsyncCubebTask::~AsyncCubebTask()
 {
 }
 
-/* static */
-nsresult
-AsyncCubebTask::EnsureThread()
+SharedThreadPool*
+AudioCallbackDriver::GetInitShutdownThread()
 {
-  if (!sThreadPool) {
-    nsCOMPtr<nsIThreadPool> threadPool =
+  if (!mInitShutdownThread) {
+    mInitShutdownThread =
       SharedThreadPool::Get(NS_LITERAL_CSTRING("CubebOperation"), 1);
-    sThreadPool = threadPool;
-    // Need to null this out before xpcom-shutdown-threads Observers run
-    // since we don't know the order that the shutdown-threads observers
-    // will run.  ClearOnShutdown guarantees it runs first.
-    if (!NS_IsMainThread()) {
-      nsCOMPtr<nsIRunnable> runnable =
-        NS_NewRunnableFunction("AsyncCubebTask::EnsureThread", []() -> void {
-          ClearOnShutdown(&sThreadPool, ShutdownPhase::ShutdownThreads);
-        });
-      AbstractThread::MainThread()->Dispatch(runnable.forget());
-    } else {
-      ClearOnShutdown(&sThreadPool, ShutdownPhase::ShutdownThreads);
-    }
 
     const uint32_t kIdleThreadTimeoutMs = 2000;
 
-    nsresult rv = sThreadPool->SetIdleThreadTimeout(PR_MillisecondsToInterval(kIdleThreadTimeoutMs));
-    if (NS_WARN_IF(NS_FAILED(rv))) {
-      return rv;
-    }
+    mInitShutdownThread->
+      SetIdleThreadTimeout(PR_MillisecondsToInterval(kIdleThreadTimeoutMs));
   }
 
-  return NS_OK;
+  return mInitShutdownThread;
 }
 
 NS_IMETHODIMP
 AsyncCubebTask::Run()
 {
   MOZ_ASSERT(mDriver);
 
   switch(mOperation) {
@@ -768,26 +731,16 @@ void
 AudioCallbackDriver::Destroy()
 {
   LOG(LogLevel::Debug, ("AudioCallbackDriver destroyed."));
   mAudioInput = nullptr;
   mAudioStream.reset();
 }
 
 void
-AudioCallbackDriver::Resume()
-{
-  LOG(LogLevel::Debug,
-      ("Resuming audio threads for MediaStreamGraph %p", mGraphImpl));
-  if (cubeb_stream_start(mAudioStream) != CUBEB_OK) {
-    NS_WARNING("Could not start cubeb stream for MSG.");
-  }
-}
-
-void
 AudioCallbackDriver::Start()
 {
   if (mPreviousDriver) {
     if (mPreviousDriver->AsAudioCallbackDriver()) {
       LOG(LogLevel::Debug, ("Releasing audio driver off main thread."));
       RefPtr<AsyncCubebTask> releaseEvent =
         new AsyncCubebTask(mPreviousDriver->AsAudioCallbackDriver(),
                            AsyncCubebOperation::SHUTDOWN);
@@ -874,16 +827,26 @@ AudioCallbackDriver::WaitForNextIteratio
 
 void
 AudioCallbackDriver::WakeUp()
 {
   mGraphImpl->GetMonitor().AssertCurrentThreadOwns();
   mGraphImpl->GetMonitor().Notify();
 }
 
+void
+AudioCallbackDriver::Shutdown()
+{
+  LOG(LogLevel::Debug,
+      ("Releasing audio driver off main thread (GraphDriver::Shutdown)."));
+  RefPtr<AsyncCubebTask> releaseEvent =
+    new AsyncCubebTask(this, AsyncCubebOperation::SHUTDOWN);
+  releaseEvent->Dispatch(NS_DISPATCH_SYNC);
+}
+
 #if defined(XP_WIN)
 void
 AudioCallbackDriver::ResetDefaultDevice()
 {
   if (cubeb_stream_reset_default_device(mAudioStream) != CUBEB_OK) {
     NS_WARNING("Could not reset cubeb stream to default output device.");
   }
 }
--- a/dom/media/GraphDriver.h
+++ b/dom/media/GraphDriver.h
@@ -117,26 +117,22 @@ public:
   /* For real-time graphs, this waits until it's time to process more data. For
    * offline graphs, this is a no-op. */
   virtual void WaitForNextIteration() = 0;
   /* Wakes up the graph if it is waiting. */
   virtual void WakeUp() = 0;
   virtual void Destroy() {}
   /* Start the graph, init the driver, start the thread. */
   virtual void Start() = 0;
-  /* Stop the graph, shutting down the thread. */
-  virtual void Stop() = 0;
-  /* Resume after a stop */
-  virtual void Resume() = 0;
   /* Revive this driver, as more messages just arrived. */
   virtual void Revive() = 0;
   /* Remove Mixer callbacks when switching */
   virtual void RemoveCallback() = 0;
   /* Shutdown GraphDriver (synchronously) */
-  void Shutdown();
+  virtual void Shutdown() = 0;
   /* Rate at which the GraphDriver runs, in ms. This can either be user
    * controlled (because we are using a {System,Offline}ClockDriver, and decide
    * how often we want to wakeup/how much we want to process per iteration), or
    * it can be indirectly set by the latency of the audio backend, and the
    * number of buffers of this audio backend: say we have four buffers, and 40ms
    * latency, we will get a callback approximately every 10ms. */
   virtual uint32_t IterationDuration() = 0;
 
@@ -269,20 +265,19 @@ class MediaStreamGraphInitThreadRunnable
  * This class is a driver that manages its own thread.
  */
 class ThreadedDriver : public GraphDriver
 {
 public:
   explicit ThreadedDriver(MediaStreamGraphImpl* aGraphImpl);
   virtual ~ThreadedDriver();
   void Start() override;
-  void Stop() override;
-  void Resume() override;
   void Revive() override;
   void RemoveCallback() override;
+  void Shutdown() override;
   /**
    * Runs main control loop on the graph thread. Normally a single invocation
    * of this runs for the entire lifetime of the graph thread.
    */
   void RunThread();
   friend class MediaStreamGraphInitThreadRunnable;
   uint32_t IterationDuration() override {
     return MEDIA_GRAPH_TARGET_PERIOD_MS;
@@ -391,22 +386,21 @@ class AudioCallbackDriver : public Graph
 #endif
 {
 public:
   explicit AudioCallbackDriver(MediaStreamGraphImpl* aGraphImpl);
   virtual ~AudioCallbackDriver();
 
   void Destroy() override;
   void Start() override;
-  void Stop() override;
-  void Resume() override;
   void Revive() override;
   void RemoveCallback() override;
   void WaitForNextIteration() override;
   void WakeUp() override;
+  void Shutdown() override;
 #if defined(XP_WIN)
   void ResetDefaultDevice() override;
 #endif
 
   /* Static wrapper function cubeb calls back. */
   static long DataCallback_s(cubeb_stream * aStream,
                              void * aUser,
                              const void * aInputBuffer,
@@ -474,29 +468,36 @@ public:
    * mStarted for details. */
   bool IsStarted();
 
   /* Tell the driver whether this process is using a microphone or not. This is
    * thread safe. */
   void SetMicrophoneActive(bool aActive);
 
   void CompleteAudioContextOperations(AsyncCubebOperation aOperation);
+
+  /* Fetch, or create a shared thread pool with up to one thread for
+   * AsyncCubebTask. */
+  SharedThreadPool* GetInitShutdownThread();
+
 private:
   /**
    * On certain MacBookPro, the microphone is located near the left speaker.
    * We need to pan the sound output to the right speaker if we are using the
    * mic and the built-in speaker, or we will have terrible echo.  */
   void PanOutputIfNeeded(bool aMicrophoneActive);
   /**
    * This is called when the output device used by the cubeb stream changes. */
   void DeviceChangedCallback();
   /* Start the cubeb stream */
   bool StartStream();
   friend class AsyncCubebTask;
   bool Init();
+  void Stop();
+
   /* MediaStreamGraphs are always down/up mixed to output channels. */
   uint32_t mOutputChannels;
   /* The size of this buffer comes from the fact that some audio backends can
    * call back with a number of frames lower than one block (128 frames), so we
    * need to keep at most two block in the SpillBuffer, because we always round
    * up to block boundaries during an iteration.
    * This is only ever accessed on the audio callback thread. */
   SpillBuffer<AudioDataValue, WEBAUDIO_BLOCK_SIZE * 2> mScratchBuffer;
@@ -537,19 +538,19 @@ private:
 
   struct AutoInCallback
   {
     explicit AutoInCallback(AudioCallbackDriver* aDriver);
     ~AutoInCallback();
     AudioCallbackDriver* mDriver;
   };
 
-  /* Thread for off-main-thread initialization and
-   * shutdown of the audio stream. */
-  nsCOMPtr<nsIThread> mInitShutdownThread;
+  /* Shared thread pool with up to one thread for off-main-thread
+   * initialization and shutdown of the audio stream via AsyncCubebTask. */
+  RefPtr<SharedThreadPool> mInitShutdownThread;
   /* This must be accessed with the graph monitor held. */
   AutoTArray<StreamAndPromiseForOperation, 1> mPromisesForOperation;
   /* Used to queue us to add the mixer callback on first run. */
   bool mAddedMixer;
 
   /* This is atomic and is set by the audio callback thread. It can be read by
    * any thread safely. */
   Atomic<bool> mInCallback;
@@ -565,31 +566,29 @@ private:
 class AsyncCubebTask : public Runnable
 {
 public:
 
   AsyncCubebTask(AudioCallbackDriver* aDriver, AsyncCubebOperation aOperation);
 
   nsresult Dispatch(uint32_t aFlags = NS_DISPATCH_NORMAL)
   {
-    nsresult rv = EnsureThread();
-    if (!NS_FAILED(rv)) {
-      rv = sThreadPool->Dispatch(this, aFlags);
+    SharedThreadPool* threadPool = mDriver->GetInitShutdownThread();
+    if (!threadPool) {
+      return NS_ERROR_FAILURE;
     }
-    return rv;
+    return threadPool->Dispatch(this, aFlags);
   }
 
 protected:
   virtual ~AsyncCubebTask();
 
 private:
-  static nsresult EnsureThread();
+  NS_IMETHOD Run() override final;
 
-  NS_IMETHOD Run() override final;
-  static StaticRefPtr<nsIThreadPool> sThreadPool;
   RefPtr<AudioCallbackDriver> mDriver;
   AsyncCubebOperation mOperation;
   RefPtr<MediaStreamGraphImpl> mShutdownGrip;
 };
 
 } // namespace mozilla
 
 #endif // GRAPHDRIVER_H_
--- a/dom/media/MediaStreamGraph.cpp
+++ b/dom/media/MediaStreamGraph.cpp
@@ -1567,16 +1567,25 @@ public:
     if (mGraph->mDriver->AsAudioCallbackDriver()) {
       MOZ_ASSERT(!mGraph->mDriver->AsAudioCallbackDriver()->InCallback());
     }
 #endif
 
     mGraph->mDriver->Shutdown(); // This will wait until it's shutdown since
                                  // we'll start tearing down the graph after this
 
+    // Release the driver now so that an AudioCallbackDriver will release its
+    // SharedThreadPool reference.  Each SharedThreadPool reference must be
+    // released before SharedThreadPool::SpinUntilEmpty() runs on
+    // xpcom-shutdown-threads.  Don't wait for GC/CC to release references to
+    // objects owning streams, or for expiration of mGraph->mShutdownTimer,
+    // which won't otherwise release its reference on the graph until
+    // nsTimerImpl::Shutdown(), which runs after xpcom-shutdown-threads.
+    mGraph->mDriver = nullptr;
+
     // Safe to access these without the monitor since the graph isn't running.
     // We may be one of several graphs. Drop ticket to eventually unblock shutdown.
     if (mGraph->mShutdownTimer && !mGraph->mForceShutdownTicket) {
       MOZ_ASSERT(false,
         "AudioCallbackDriver took too long to shut down and we let shutdown"
         " continue - freezing and leaking");
 
       // The timer fired, so we may be deeper in shutdown now.  Block any further
--- a/dom/media/MediaStreamGraphImpl.h
+++ b/dom/media/MediaStreamGraphImpl.h
@@ -459,21 +459,16 @@ public:
 #ifdef DEBUG
     if (!OnGraphThreadOrNotRunning()) {
       mMonitor.AssertCurrentThreadOwns();
     }
 #endif
     return mDriver;
   }
 
-  bool RemoveMixerCallback(MixerCallbackReceiver* aReceiver)
-  {
-    return mMixer.RemoveCallback(aReceiver);
-  }
-
   /**
    * Effectively set the new driver, while we are switching.
    * It is only safe to call this at the very end of an iteration, when there
    * has been a SwitchAtNextIteration call during the iteration. The driver
    * should return and pass the control to the new driver shortly after.
    * We can also switch from Revive() (on MainThread), in which case the
    * monitor is held
    */
--- a/gfx/layers/Layers.cpp
+++ b/gfx/layers/Layers.cpp
@@ -2299,17 +2299,17 @@ LayerManager::Dump(std::stringstream& aS
     aStream << "<ul><li>";
   }
 #endif
   DumpSelf(aStream, aPrefix, aSorted);
 
   nsAutoCString pfx(aPrefix);
   pfx += "  ";
   if (!GetRoot()) {
-    aStream << nsPrintfCString("%s(null)", pfx.get()).get();
+    aStream << nsPrintfCString("%s(null)\n", pfx.get()).get();
     if (aDumpHtml) {
       aStream << "</li></ul>";
     }
     return;
   }
 
   if (aDumpHtml) {
     aStream << "<ul>";
--- a/gfx/layers/wr/WebRenderCommandBuilder.cpp
+++ b/gfx/layers/wr/WebRenderCommandBuilder.cpp
@@ -11,18 +11,20 @@
 #include "mozilla/gfx/DrawEventRecorder.h"
 #include "mozilla/layers/ImageClient.h"
 #include "mozilla/layers/WebRenderBridgeChild.h"
 #include "mozilla/layers/WebRenderLayerManager.h"
 #include "mozilla/layers/IpcResourceUpdateQueue.h"
 #include "mozilla/layers/ScrollingLayersHelper.h"
 #include "mozilla/layers/StackingContextHelper.h"
 #include "mozilla/layers/UpdateImageHelper.h"
+#include "gfxEnv.h"
 #include "nsDisplayListInvalidation.h"
 #include "WebRenderCanvasRenderer.h"
+#include "LayersLogging.h"
 #include "LayerTreeInvalidation.h"
 
 namespace mozilla {
 namespace layers {
 
 void WebRenderCommandBuilder::Destroy()
 {
   mLastCanvasDatas.Clear();
@@ -349,17 +351,17 @@ PaintItemByDrawTarget(nsDisplayItem* aIt
     break;
   case DisplayItemType::TYPE_FILTER:
     {
       if (aManager == nullptr) {
         aManager = new BasicLayerManager(BasicLayerManager::BLM_INACTIVE);
       }
 
       FrameLayerBuilder* layerBuilder = new FrameLayerBuilder();
-      layerBuilder->Init(aDisplayListBuilder, aManager);
+      layerBuilder->Init(aDisplayListBuilder, aManager, nullptr, true);
       layerBuilder->DidBeginRetainedLayerTransaction(aManager);
 
       aManager->BeginTransactionWithTarget(context);
 
       ContainerLayerParameters param;
       RefPtr<Layer> layer =
         static_cast<nsDisplayFilter*>(aItem)->BuildLayer(aDisplayListBuilder,
                                                          aManager, param);
@@ -370,16 +372,25 @@ PaintItemByDrawTarget(nsDisplayItem* aIt
 
         aManager->SetRoot(layer);
         layerBuilder->WillEndTransaction();
 
         static_cast<nsDisplayFilter*>(aItem)->PaintAsLayer(aDisplayListBuilder,
                                                            context, aManager);
       }
 
+#ifdef MOZ_DUMP_PAINTING
+      if (gfxUtils::DumpDisplayList() || gfxEnv::DumpPaint()) {
+        fprintf_stderr(gfxUtils::sDumpPaintFile, "Basic layer tree for painting contents of display item %s(%p):\n", aItem->Name(), aItem->Frame());
+        std::stringstream stream;
+        aManager->Dump(stream, "", gfxEnv::DumpPaintToFile());
+        fprint_stderr(gfxUtils::sDumpPaintFile, stream);  // not a typo, fprint_stderr declared in LayersLogging.h
+      }
+#endif
+
       if (aManager->InTransaction()) {
         aManager->AbortTransaction();
       }
       aManager->SetTarget(nullptr);
       break;
     }
   default:
     aItem->Paint(aDisplayListBuilder, context);
--- a/layout/base/AccessibleCaretManager.cpp
+++ b/layout/base/AccessibleCaretManager.cpp
@@ -501,18 +501,25 @@ AccessibleCaretManager::PressCaret(const
 }
 
 nsresult
 AccessibleCaretManager::DragCaret(const nsPoint& aPoint)
 {
   MOZ_ASSERT(mActiveCaret);
   MOZ_ASSERT(GetCaretMode() != CaretMode::None);
 
-  nsPoint point(aPoint.x, aPoint.y + mOffsetYToCaretLogicalPosition);
-  DragCaretInternal(point);
+  if (!mPresShell || !mPresShell->GetRootFrame() || !GetSelection()) {
+    return NS_ERROR_NULL_POINTER;
+  }
+
+  StopSelectionAutoScrollTimer();
+  DragCaretInternal(aPoint);
+
+  // We want to scroll the page even if we failed to drag the caret.
+  StartSelectionAutoScrollTimer(aPoint);
   UpdateCarets();
   return NS_OK;
 }
 
 nsresult
 AccessibleCaretManager::ReleaseCaret()
 {
   MOZ_ASSERT(mActiveCaret);
@@ -1205,39 +1212,34 @@ AccessibleCaretManager::CompareTreePosit
 {
   return (aStartFrame && aEndFrame &&
           nsLayoutUtils::CompareTreePosition(aStartFrame, aEndFrame) <= 0);
 }
 
 nsresult
 AccessibleCaretManager::DragCaretInternal(const nsPoint& aPoint)
 {
-  if (!mPresShell) {
-    return NS_ERROR_NULL_POINTER;
-  }
+  MOZ_ASSERT(mPresShell);
 
   nsIFrame* rootFrame = mPresShell->GetRootFrame();
-  if (!rootFrame) {
-    return NS_ERROR_NULL_POINTER;
-  }
+  MOZ_ASSERT(rootFrame, "We need root frame to compute caret dragging!");
 
-  nsPoint point = AdjustDragBoundary(aPoint);
+  nsPoint point = AdjustDragBoundary(
+    nsPoint(aPoint.x, aPoint.y + mOffsetYToCaretLogicalPosition));
 
   // Find out which content we point to
   nsIFrame* ptFrame = nsLayoutUtils::GetFrameForPoint(
     rootFrame, point,
     nsLayoutUtils::IGNORE_PAINT_SUPPRESSION | nsLayoutUtils::IGNORE_CROSS_DOC);
   if (!ptFrame) {
     return NS_ERROR_FAILURE;
   }
 
   RefPtr<nsFrameSelection> fs = GetFrameSelection();
-  if (!fs) {
-    return NS_ERROR_NULL_POINTER;
-  }
+  MOZ_ASSERT(fs);
 
   nsresult result;
   nsIFrame* newFrame = nullptr;
   nsPoint newPoint;
   nsPoint ptInFrame = point;
   nsLayoutUtils::TransformPoint(rootFrame, ptFrame, ptInFrame);
   result = fs->ConstrainFrameAndPointToAnchorSubtree(ptFrame, ptInFrame,
                                                      &newFrame, newPoint);
@@ -1250,47 +1252,26 @@ AccessibleCaretManager::DragCaretInterna
   }
 
   nsIFrame::ContentOffsets offsets =
     newFrame->GetContentOffsetsFromPoint(newPoint);
   if (offsets.IsNull()) {
     return NS_ERROR_FAILURE;
   }
 
-  Selection* selection = GetSelection();
-  if (!selection) {
-    return NS_ERROR_NULL_POINTER;
-  }
-
   if (GetCaretMode() == CaretMode::Selection &&
       !RestrictCaretDraggingOffsets(offsets)) {
     return NS_ERROR_FAILURE;
   }
 
   ClearMaintainedSelection();
 
-  nsIFrame* anchorFrame = nullptr;
-  selection->GetPrimaryFrameForAnchorNode(&anchorFrame);
-
-  nsIFrame* scrollable =
-    nsLayoutUtils::GetClosestFrameOfType(anchorFrame, LayoutFrameType::Scroll);
-  AutoWeakFrame weakScrollable = scrollable;
   fs->HandleClick(offsets.content, offsets.StartOffset(), offsets.EndOffset(),
                   GetCaretMode() == CaretMode::Selection, false,
                   offsets.associate);
-  if (!weakScrollable.IsAlive()) {
-    return NS_OK;
-  }
-
-  // Scroll scrolled frame.
-  nsIScrollableFrame* saf = do_QueryFrame(scrollable);
-  nsIFrame* capturingFrame = saf->GetScrolledFrame();
-  nsPoint ptInScrolled = point;
-  nsLayoutUtils::TransformPoint(rootFrame, capturingFrame, ptInScrolled);
-  fs->StartAutoScrollTimer(capturingFrame, ptInScrolled, kAutoScrollTimerDelay);
   return NS_OK;
 }
 
 nsRect
 AccessibleCaretManager::GetAllChildFrameRectsUnion(nsIFrame* aFrame) const
 {
   nsRect unionRect;
 
@@ -1375,16 +1356,61 @@ AccessibleCaretManager::AdjustDragBounda
       }
     }
   }
 
   return adjustedPoint;
 }
 
 void
+AccessibleCaretManager::StartSelectionAutoScrollTimer(
+  const nsPoint& aPoint) const
+{
+  Selection* selection = GetSelection();
+  MOZ_ASSERT(selection);
+
+  nsIFrame* anchorFrame = nullptr;
+  selection->GetPrimaryFrameForAnchorNode(&anchorFrame);
+  if (!anchorFrame) {
+    return;
+  }
+
+  nsIScrollableFrame* scrollFrame =
+    nsLayoutUtils::GetNearestScrollableFrame(
+      anchorFrame,
+      nsLayoutUtils::SCROLLABLE_SAME_DOC |
+      nsLayoutUtils::SCROLLABLE_INCLUDE_HIDDEN);
+  if (!scrollFrame) {
+    return;
+  }
+
+  nsIFrame* capturingFrame = scrollFrame->GetScrolledFrame();
+  if (!capturingFrame) {
+    return;
+  }
+
+  nsIFrame* rootFrame = mPresShell->GetRootFrame();
+  MOZ_ASSERT(rootFrame);
+  nsPoint ptInScrolled = aPoint;
+  nsLayoutUtils::TransformPoint(rootFrame, capturingFrame, ptInScrolled);
+
+  RefPtr<nsFrameSelection> fs = GetFrameSelection();
+  MOZ_ASSERT(fs);
+  fs->StartAutoScrollTimer(capturingFrame, ptInScrolled, kAutoScrollTimerDelay);
+}
+
+void
+AccessibleCaretManager::StopSelectionAutoScrollTimer() const
+{
+  RefPtr<nsFrameSelection> fs = GetFrameSelection();
+  MOZ_ASSERT(fs);
+  fs->StopAutoScrollTimer();
+}
+
+void
 AccessibleCaretManager::DispatchCaretStateChangedEvent(CaretChangedReason aReason) const
 {
   if (!mPresShell) {
     return;
   }
 
   FlushLayout();
   if (IsTerminated()) {
--- a/layout/base/AccessibleCaretManager.h
+++ b/layout/base/AccessibleCaretManager.h
@@ -186,16 +186,22 @@ protected:
   nsIFrame* GetFrameForFirstRangeStartOrLastRangeEnd(
     nsDirection aDirection,
     int32_t* aOutOffset,
     nsIContent** aOutContent = nullptr,
     int32_t* aOutContentOffset = nullptr) const;
 
   nsresult DragCaretInternal(const nsPoint& aPoint);
   nsPoint AdjustDragBoundary(const nsPoint& aPoint) const;
+
+  // Start the selection scroll timer if the caret is being dragged out of
+  // the scroll port.
+  void StartSelectionAutoScrollTimer(const nsPoint& aPoint) const;
+  void StopSelectionAutoScrollTimer() const;
+
   void ClearMaintainedSelection() const;
 
   // Caller is responsible to use IsTerminated() to check whether PresShell is
   // still valid.
   void FlushLayout() const;
 
   dom::Element* GetEditingHostForFrame(nsIFrame* aFrame) const;
   dom::Selection* GetSelection() const;
--- a/layout/base/nsPresContext.cpp
+++ b/layout/base/nsPresContext.cpp
@@ -2248,26 +2248,39 @@ nsPresContext::HasAuthorSpecifiedRules(c
   if (auto* geckoStyleContext = aFrame->StyleContext()->GetAsGecko()) {
     return
       nsRuleNode::HasAuthorSpecifiedRules(geckoStyleContext,
                                           aRuleTypeMask,
                                           UseDocumentColors());
   }
   Element* elem = aFrame->GetContent()->AsElement();
 
-  MOZ_ASSERT(elem->GetPseudoElementType() ==
-             aFrame->StyleContext()->GetPseudoType());
-  if (elem->HasServoData()) {
-    return Servo_HasAuthorSpecifiedRules(elem,
-                                         aRuleTypeMask,
-                                         UseDocumentColors());
-  } else {
+  // We need to handle non-generated content pseudos too, so we use
+  // the parent of generated content pseudo to be consistent.
+  if (elem->GetPseudoElementType() != CSSPseudoElementType::NotPseudo) {
+    MOZ_ASSERT(elem->GetParent(), "Pseudo element has no parent element?");
+    elem = elem->GetParent()->AsElement();
+  }
+  if (MOZ_UNLIKELY(!elem->HasServoData())) {
     // Probably shouldn't happen, but does. See bug 1387953
     return false;
   }
+
+  nsStyleContext* styleContext = aFrame->StyleContext();
+  CSSPseudoElementType pseudoType = styleContext->GetPseudoType();
+  // Anonymous boxes are more complicated, and we just assume that they
+  // cannot have any author-specified rules here.
+  if (pseudoType == CSSPseudoElementType::InheritingAnonBox ||
+      pseudoType == CSSPseudoElementType::NonInheritingAnonBox) {
+    return false;
+  }
+  return Servo_HasAuthorSpecifiedRules(styleContext->AsServo(),
+                                       elem, pseudoType,
+                                       aRuleTypeMask,
+                                       UseDocumentColors());
 }
 
 gfxUserFontSet*
 nsPresContext::GetUserFontSet(bool aFlushUserFontSet)
 {
   return mDocument->GetUserFontSet(aFlushUserFontSet);
 }
 
--- a/layout/painting/FrameLayerBuilder.cpp
+++ b/layout/painting/FrameLayerBuilder.cpp
@@ -118,16 +118,17 @@ static inline MaskLayerImageCache* GetMa
 
 FrameLayerBuilder::FrameLayerBuilder()
   : mRetainingManager(nullptr)
   , mContainingPaintedLayer(nullptr)
   , mInactiveLayerClip(nullptr)
   , mDetectedDOMModification(false)
   , mInvalidateAllLayers(false)
   , mInLayerTreeCompressionMode(false)
+  , mIsInactiveLayerManager(false)
   , mContainerLayerGeneration(0)
   , mMaxContainerLayerGeneration(0)
 {
   MOZ_COUNT_CTOR(FrameLayerBuilder);
 }
 
 FrameLayerBuilder::~FrameLayerBuilder()
 {
@@ -1792,24 +1793,26 @@ FrameLayerBuilder::Shutdown()
     delete gMaskLayerImageCache;
     gMaskLayerImageCache = nullptr;
   }
 }
 
 void
 FrameLayerBuilder::Init(nsDisplayListBuilder* aBuilder, LayerManager* aManager,
                         PaintedLayerData* aLayerData,
+                        bool aIsInactiveLayerManager,
                         const DisplayItemClip* aInactiveLayerClip)
 {
   mDisplayListBuilder = aBuilder;
   mRootPresContext = aBuilder->RootReferenceFrame()->PresContext()->GetRootPresContext();
   if (mRootPresContext) {
     mInitialDOMGeneration = mRootPresContext->GetDOMGeneration();
   }
   mContainingPaintedLayer = aLayerData;
+  mIsInactiveLayerManager = aIsInactiveLayerManager;
   mInactiveLayerClip = aInactiveLayerClip;
   aManager->SetUserData(&gLayerManagerLayerBuilder, this);
 }
 
 void
 FrameLayerBuilder::FlashPaint(gfxContext *aContext)
 {
   float r = float(rand()) / RAND_MAX;
@@ -4705,17 +4708,18 @@ FrameLayerBuilder::AddPaintedDisplayItem
   if (entry) {
     entry->mContainerLayerFrame = aContainerState.GetContainerFrame();
     if (entry->mContainerLayerGeneration == 0) {
       entry->mContainerLayerGeneration = mContainerLayerGeneration;
     }
     if (tempManager) {
       FLB_LOG_PAINTED_LAYER_DECISION(aLayerData, "Creating nested FLB for item %p\n", aItem);
       FrameLayerBuilder* layerBuilder = new FrameLayerBuilder();
-      layerBuilder->Init(mDisplayListBuilder, tempManager, aLayerData, &aClip);
+      layerBuilder->Init(mDisplayListBuilder, tempManager, aLayerData, true,
+                         &aClip);
 
       tempManager->BeginTransaction();
       if (mRetainingManager) {
         layerBuilder->DidBeginRetainedLayerTransaction(tempManager);
       }
 
       UniquePtr<LayerProperties> props(LayerProperties::CloneFrom(tempManager->GetRoot()));
       RefPtr<Layer> tmpLayer =
--- a/layout/painting/FrameLayerBuilder.h
+++ b/layout/painting/FrameLayerBuilder.h
@@ -335,16 +335,17 @@ public:
 
   FrameLayerBuilder();
   ~FrameLayerBuilder();
 
   static void Shutdown();
 
   void Init(nsDisplayListBuilder* aBuilder, LayerManager* aManager,
             PaintedLayerData* aLayerData = nullptr,
+            bool aIsInactiveLayerManager = false,
             const DisplayItemClip* aInactiveLayerClip = nullptr);
 
   /**
    * Call this to notify that we have just started a transaction on the
    * retained layer manager aManager.
    */
   void DidBeginRetainedLayerTransaction(LayerManager* aManager);
 
@@ -725,17 +726,17 @@ public:
 
   const DisplayItemClip* GetInactiveLayerClip() const
   {
     return mInactiveLayerClip;
   }
 
   bool IsBuildingRetainedLayers()
   {
-    return !mContainingPaintedLayer && mRetainingManager;
+    return !mIsInactiveLayerManager && mRetainingManager;
   }
 
   /**
    * Attempt to build the most compressed layer tree possible, even if it means
    * throwing away existing retained buffers.
    */
   void SetLayerTreeCompressionMode() { mInLayerTreeCompressionMode = true; }
   bool CheckInLayerTreeCompressionMode();
@@ -794,15 +795,17 @@ protected:
   /**
    * Indicates that the entire layer tree should be rerendered
    * during this paint.
    */
   bool                                mInvalidateAllLayers;
 
   bool                                mInLayerTreeCompressionMode;
 
+  bool                                mIsInactiveLayerManager;
+
   uint32_t                            mContainerLayerGeneration;
   uint32_t                            mMaxContainerLayerGeneration;
 };
 
 } // namespace mozilla
 
 #endif /* FRAMELAYERBUILDER_H_ */
--- a/layout/painting/nsDisplayList.cpp
+++ b/layout/painting/nsDisplayList.cpp
@@ -5433,16 +5433,20 @@ nsDisplayBoxShadowOuter::CreateWebRender
   for (uint32_t i = 0; i < rects.Length(); ++i) {
     LayoutDeviceRect clipRect = LayoutDeviceRect::FromAppUnits(
         rects[i], appUnitsPerDevPixel);
     nsCSSShadowArray* shadows = mFrame->StyleEffects()->mBoxShadow;
     MOZ_ASSERT(shadows);
 
     for (uint32_t j = shadows->Length(); j  > 0; j--) {
       nsCSSShadowItem* shadow = shadows->ShadowAt(j - 1);
+      if (shadow->mInset) {
+        continue;
+      }
+
       float blurRadius = float(shadow->mRadius) / float(appUnitsPerDevPixel);
       gfx::Color shadowColor = nsCSSRendering::GetShadowColor(shadow,
                                                               mFrame,
                                                               mOpacity);
 
       // We don't move the shadow rect here since WR does it for us
       // Now translate everything to device pixels.
       nsRect shadowRect = frameRect;
new file mode 100644
--- /dev/null
+++ b/layout/reftests/box-shadow/boxshadow-mixed-2-ref.html
@@ -0,0 +1,26 @@
+<!DOCTYPE HTML>
+<title>Reference, bug 1402060</title>
+<style>
+
+.blackAtTop {
+  width: 100px;
+  height: 90px;
+  border-top: 10px solid black;
+}
+
+.blue {
+  width: 100px;
+  height: 100px;
+  background: blue;
+  margin-left: 10px;
+}
+
+.white {
+  width: 90px;
+  height: 90px;
+  background: white;
+}
+
+</style>
+
+<div class="blackAtTop"><div class="blue"><div class="white"></div></div></div>
new file mode 100644
--- /dev/null
+++ b/layout/reftests/box-shadow/boxshadow-mixed-2.html
@@ -0,0 +1,13 @@
+<!DOCTYPE HTML>
+<title>Testcase, bug 1402060</title>
+<style>
+
+div {
+  width: 100px;
+  height: 100px;
+  box-shadow: inset 0 10px black, 10px 10px blue;
+}
+
+</style>
+
+<div></div>
--- a/layout/reftests/box-shadow/reftest.list
+++ b/layout/reftests/box-shadow/reftest.list
@@ -11,16 +11,17 @@ random != boxshadow-blur-2.html boxshado
 == boxshadow-rounding.html boxshadow-rounding-ref.html
 # One uses old path, one uses WR box shadow.
 fails-if(Android) fuzzy-if(webrender,50,3310) == boxshadow-button.html boxshadow-button-ref.html
 fuzzy-if(OSX==1010,1,24) fuzzy-if(d2d,16,908) fuzzy-if(webrender,18,2160) == boxshadow-large-border-radius.html boxshadow-large-border-radius-ref.html # Bug 1209649
 
 fails-if(Android) == boxshadow-fileupload.html boxshadow-fileupload-ref.html
 fuzzy-if(skiaContent,13,28) fuzzy-if(webrender,29-29,453-453) == boxshadow-inner-basic.html boxshadow-inner-basic-ref.svg
 random-if(layersGPUAccelerated) == boxshadow-mixed.html boxshadow-mixed-ref.html
+== boxshadow-mixed-2.html boxshadow-mixed-2-ref.html
 random-if(d2d) fuzzy-if(skiaContent,1,100) fuzzy-if(webrender,127,3528) == boxshadow-rounded-spread.html boxshadow-rounded-spread-ref.html
 fuzzy-if(skiaContent,1,50) HTTP(..) == boxshadow-dynamic.xul boxshadow-dynamic-ref.xul
 random-if(d2d) fails-if(webrender) == boxshadow-onecorner.html boxshadow-onecorner-ref.html
 random-if(d2d) fails-if(webrender) == boxshadow-twocorners.html boxshadow-twocorners-ref.html
 random-if(d2d) fails-if(webrender) == boxshadow-threecorners.html boxshadow-threecorners-ref.html
 fuzzy(2,440) fuzzy-if(webrender,25,1300) == boxshadow-skiprect.html boxshadow-skiprect-ref.html
 == boxshadow-opacity.html boxshadow-opacity-ref.html
 == boxshadow-color-rounding.html boxshadow-color-rounding-ref.html
new file mode 100644
--- /dev/null
+++ b/layout/reftests/bugs/1406183-1-ref.html
@@ -0,0 +1,23 @@
+<!DOCTYPE html>
+<meta charset="utf-8">
+
+<title>Reference for bug 1406183: ImageLayer inside inactive BasicLayerManager for fallback nsDisplayFilter is drawn at the wrong position</title>
+
+<style>
+
+body {
+  margin: 0;
+}
+
+.outer {
+  margin-left: 100px;
+  margin-top: 50px;
+  width: 200px;
+  height: 200px;
+}
+
+</style>
+
+<div class="outer">
+  <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAYAAAAGCAIAAABvrngfAAAAFklEQVQImWMwjWhCQwxECoW3oCHihAB0LyYv5/oAHwAAAABJRU5ErkJggg==">
+</div>
new file mode 100644
--- /dev/null
+++ b/layout/reftests/bugs/1406183-1.html
@@ -0,0 +1,30 @@
+<!DOCTYPE html>
+<meta charset="utf-8">
+
+<title>Testcase for bug 1406183: ImageLayer inside inactive BasicLayerManager for fallback nsDisplayFilter is drawn at the wrong position</title>
+
+<style>
+
+body {
+  margin: 0;
+}
+
+.outer {
+  margin-left: 100px;
+  margin-top: 50px;
+  width: 200px;
+  height: 200px;
+}
+
+.filter {
+  height: 200px;
+  filter: hue-rotate(0deg);
+}
+
+</style>
+
+<div class="outer">
+  <div class="filter">
+    <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAYAAAAGCAIAAABvrngfAAAAFklEQVQImWMwjWhCQwxECoW3oCHihAB0LyYv5/oAHwAAAABJRU5ErkJggg==">
+  </div>
+</div>
--- a/layout/reftests/bugs/reftest.list
+++ b/layout/reftests/bugs/reftest.list
@@ -2040,8 +2040,9 @@ needs-focus != 1377447-1.html 1377447-2.
 == 1381821.html 1381821-ref.html
 == 1395650-1.html 1395650-1-ref.html
 == 1398500-1.html 1398500-1-ref.html
 == 1401317.html 1401317-ref.html
 == 1401992.html 1401992-ref.html
 == 1405878-1.xml 1405878-1-ref.xml
 == 1404057.html 1404057-ref.html
 != 1404057.html 1404057-noref.html
+== 1406183-1.html 1406183-1-ref.html
--- a/layout/style/ServoBindingList.h
+++ b/layout/style/ServoBindingList.h
@@ -582,17 +582,19 @@ SERVO_BINDING_FUNC(Servo_ResolvePseudoSt
                    mozilla::CSSPseudoElementType pseudo_type,
                    bool is_probe,
                    ServoStyleContextBorrowedOrNull inherited_style,
                    RawServoStyleSetBorrowed set)
 SERVO_BINDING_FUNC(Servo_SetExplicitStyle, void,
                    RawGeckoElementBorrowed element,
                    ServoStyleContextBorrowed primary_style)
 SERVO_BINDING_FUNC(Servo_HasAuthorSpecifiedRules, bool,
+                   ServoStyleContextBorrowed style,
                    RawGeckoElementBorrowed element,
+                   mozilla::CSSPseudoElementType pseudo_type,
                    uint32_t rule_type_mask,
                    bool author_colors_allowed)
 
 // Resolves style for an element or pseudo-element without processing pending
 // restyles first. The Element and its ancestors may be unstyled, have pending
 // restyles, or be in a display:none subtree. Styles are cached when possible,
 // though caching is not possible within display:none subtrees, and the styles
 // may be invalidated by already-scheduled restyles.
--- a/layout/style/ServoBindings.cpp
+++ b/layout/style/ServoBindings.cpp
@@ -2836,8 +2836,20 @@ Gecko_AddBufferToCrashReport(const void*
 {
   MOZ_ASSERT(NS_IsMainThread());
 #ifdef MOZ_CRASHREPORTER
   nsCOMPtr<nsICrashReporter> cr = do_GetService("@mozilla.org/toolkit/crash-reporter;1");
   NS_ENSURE_TRUE_VOID(cr);
   cr->RegisterAppMemory((uint64_t) addr, len);
 #endif
 }
+
+void Gecko_AnnotateCrashReport(const char* key_str, const char* value_str)
+{
+  MOZ_ASSERT(NS_IsMainThread());
+  nsDependentCString key(key_str);
+  nsDependentCString value(value_str);
+#ifdef MOZ_CRASHREPORTER
+  nsCOMPtr<nsICrashReporter> cr = do_GetService("@mozilla.org/toolkit/crash-reporter;1");
+  NS_ENSURE_TRUE_VOID(cr);
+  cr->AnnotateCrashReport(key, value);
+#endif
+}
--- a/layout/style/ServoBindings.h
+++ b/layout/style/ServoBindings.h
@@ -676,16 +676,17 @@ void Gecko_UnregisterProfilerThread();
 bool Gecko_DocumentRule_UseForPresentation(RawGeckoPresContextBorrowed,
                                            const nsACString* aPattern,
                                            mozilla::css::URLMatchingFunction aURLMatchingFunction);
 
 // Allocator hinting.
 void Gecko_SetJemallocThreadLocalArena(bool enabled);
 
 void Gecko_AddBufferToCrashReport(const void* addr, size_t len);
+void Gecko_AnnotateCrashReport(const char* key_str, const char* value_str);
 
 // Pseudo-element flags.
 #define CSS_PSEUDO_ELEMENT(name_, value_, flags_) \
   const uint32_t SERVO_CSS_PSEUDO_ELEMENT_FLAGS_##name_ = flags_;
 #include "nsCSSPseudoElementList.h"
 #undef CSS_PSEUDO_ELEMENT
 
 #define SERVO_BINDING_FUNC(name_, return_, ...) return_ name_(__VA_ARGS__);
--- a/layout/style/ServoStyleSet.cpp
+++ b/layout/style/ServoStyleSet.cpp
@@ -339,17 +339,17 @@ already_AddRefed<ServoStyleContext>
 ServoStyleSet::ResolveStyleFor(Element* aElement,
                                ServoStyleContext* aParentContext,
                                LazyComputeBehavior aMayCompute)
 {
   RefPtr<ServoStyleContext> computedValues;
   if (aMayCompute == LazyComputeBehavior::Allow) {
     PreTraverseSync();
     return ResolveStyleLazilyInternal(
-        aElement, CSSPseudoElementType::NotPseudo, nullptr);
+        aElement, CSSPseudoElementType::NotPseudo);
   }
 
   return ResolveServoStyle(aElement);
 }
 
 /**
  * Clears any stale Servo element data that might existing in the specified
  * element's document.  Upon destruction, asserts that the element and all
@@ -595,17 +595,16 @@ ServoStyleSet::ResolvePseudoElementStyle
 
   MOZ_ASSERT(computedValues);
   return computedValues.forget();
 }
 
 already_AddRefed<ServoStyleContext>
 ServoStyleSet::ResolveStyleLazily(Element* aElement,
                                   CSSPseudoElementType aPseudoType,
-                                  nsAtom* aPseudoTag,
                                   StyleRuleInclusion aRuleInclusion)
 {
   // Lazy style computation avoids storing any new data in the tree.
   // If the tree has stale data in it, then the AutoClearStaleData below
   // will ensure it's cleared so we don't use it. But if the document is
   // in the bfcache, then we will have valid, usable data in the tree,
   // but we don't want to use it. Instead we want to pretend as if the
   // document has no pres shell and no styles.
@@ -617,17 +616,17 @@ ServoStyleSet::ResolveStyleLazily(Elemen
   // existing styles in the DOM, then we would do selector matching on the
   // undisplayed element with the caller's style set's rules, but inherit from
   // values that were computed with the style set from the target element's
   // hidden-by-the-bfcache-entry pres shell.
   bool ignoreExistingStyles = aElement->OwnerDoc()->GetBFCacheEntry();
 
   AutoClearStaleData guard(aElement);
   PreTraverseSync();
-  return ResolveStyleLazilyInternal(aElement, aPseudoType, aPseudoTag,
+  return ResolveStyleLazilyInternal(aElement, aPseudoType,
                                     aRuleInclusion,
                                     ignoreExistingStyles);
 }
 
 already_AddRefed<ServoStyleContext>
 ServoStyleSet::ResolveInheritingAnonymousBoxStyle(nsAtom* aPseudoTag,
                                                   ServoStyleContext* aParentContext)
 {
@@ -981,20 +980,22 @@ ServoStyleSet::StyleDocument(ServoTraver
       root->HasAnyOfFlags(Element::kAllServoDescendantBits | NODE_NEEDS_FRAME);
 
     if (parent) {
       MOZ_ASSERT(root == doc->GetServoRestyleRoot());
       if (parent->HasDirtyDescendantsForServo()) {
         // If any style invalidation was triggered in our siblings, then we may
         // need to post-traverse them, even if the root wasn't restyled after
         // all.
+        uint32_t existingBits = doc->GetServoRestyleRootDirtyBits();
+        // We need to propagate the existing bits to the parent.
+        parent->SetFlags(existingBits);
         doc->SetServoRestyleRoot(
             parent,
-            doc->GetServoRestyleRootDirtyBits() |
-            ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO);
+            existingBits | ELEMENT_HAS_DIRTY_DESCENDANTS_FOR_SERVO);
         postTraversalRequired = true;
       }
     }
   }
 
   // If there are still animation restyles needed, trigger a second traversal to
   // update CSS animations or transitions' styles.
   //
@@ -1215,17 +1216,16 @@ ServoStyleSet::GetAnimationValues(
                            mRawSet.get(),
                            &aAnimationValues);
 }
 
 already_AddRefed<ServoStyleContext>
 ServoStyleSet::GetBaseContextForElement(
   Element* aElement,
   nsPresContext* aPresContext,
-  nsAtom* aPseudoTag,
   CSSPseudoElementType aPseudoType,
   const ServoStyleContext* aStyle)
 {
   // Servo_StyleSet_GetBaseComputedValuesForElement below won't handle ignoring
   // existing element data for bfcached documents. (See comment in
   // ResolveStyleLazily about these bfcache issues.)
   MOZ_RELEASE_ASSERT(!aElement->OwnerDoc()->GetBFCacheEntry(),
              "GetBaseContextForElement does not support documents in the "
@@ -1329,17 +1329,16 @@ ServoStyleSet::ClearNonInheritingStyleCo
   for (RefPtr<ServoStyleContext>& ptr : mNonInheritingStyleContexts) {
     ptr = nullptr;
   }
 }
 
 already_AddRefed<ServoStyleContext>
 ServoStyleSet::ResolveStyleLazilyInternal(Element* aElement,
                                           CSSPseudoElementType aPseudoType,
-                                          nsAtom* aPseudoTag,
                                           StyleRuleInclusion aRuleInclusion,
                                           bool aIgnoreExistingStyles)
 {
   mPresContext->EffectCompositor()->PreTraverse(aElement, aPseudoType);
   MOZ_ASSERT(!StylistNeedsUpdate());
 
   AutoSetInServoTraversal guard(this);
 
--- a/layout/style/ServoStyleSet.h
+++ b/layout/style/ServoStyleSet.h
@@ -214,21 +214,20 @@ public:
   ResolvePseudoElementStyle(dom::Element* aOriginatingElement,
                             CSSPseudoElementType aType,
                             ServoStyleContext* aParentContext,
                             dom::Element* aPseudoElement);
 
   // Resolves style for a (possibly-pseudo) Element without assuming that the
   // style has been resolved. If the element was unstyled and a new style
   // context was resolved, it is not stored in the DOM. (That is, the element
-  // remains unstyled.) |aPeudoTag| and |aPseudoType| must match.
+  // remains unstyled.)
   already_AddRefed<ServoStyleContext>
   ResolveStyleLazily(dom::Element* aElement,
                      CSSPseudoElementType aPseudoType,
-                     nsAtom* aPseudoTag,
                      StyleRuleInclusion aRules =
                        StyleRuleInclusion::All);
 
   // Get a style context for an anonymous box.  aPseudoTag is the pseudo-tag to
   // use and must be non-null.  It must be an anon box, and must be one that
   // inherits style from the given aParentContext.
   already_AddRefed<ServoStyleContext>
   ResolveInheritingAnonymousBoxStyle(nsAtom* aPseudoTag,
@@ -385,17 +384,16 @@ public:
   nsCSSCounterStyleRule* CounterStyleRuleForName(nsAtom* aName);
 
   // Get all the currently-active font feature values set.
   already_AddRefed<gfxFontFeatureValueSet> BuildFontFeatureValueSet();
 
   already_AddRefed<ServoStyleContext>
   GetBaseContextForElement(dom::Element* aElement,
                            nsPresContext* aPresContext,
-                           nsAtom* aPseudoTag,
                            CSSPseudoElementType aPseudoType,
                            const ServoStyleContext* aStyle);
 
   /**
    * Resolve style for a given declaration block with/without the parent style.
    * If the parent style is not specified, the document default computed values
    * is used.
    */
@@ -553,17 +551,16 @@ private:
    *
    * This should only be called if StylistNeedsUpdate returns true.
    */
   void UpdateStylist();
 
   already_AddRefed<ServoStyleContext>
     ResolveStyleLazilyInternal(dom::Element* aElement,
                                CSSPseudoElementType aPseudoType,
-                               nsAtom* aPseudoTag,
                                StyleRuleInclusion aRules =
                                  StyleRuleInclusion::All,
                                bool aIgnoreExistingStyles = false);
 
   void RunPostTraversalTasks();
 
   void PrependSheetOfType(SheetType aType,
                           ServoStyleSheet* aSheet);
new file mode 100644
--- /dev/null
+++ b/layout/style/crashtests/1401825.html
@@ -0,0 +1,7 @@
+<!DOCTYPE html>
+<style>
+::-moz-list-bullet {
+  -moz-appearance: button;
+}
+</style>
+<li></li>
--- a/layout/style/crashtests/crashtests.list
+++ b/layout/style/crashtests/crashtests.list
@@ -225,16 +225,17 @@ load 1400035.html
 load 1400325.html
 load 1400926.html
 load 1400936-1.html
 load 1400936-2.html
 load 1401256.html
 load 1401692.html
 load 1401706.html
 load 1401801.html
+load 1401825.html
 load 1402218-1.html
 load 1402366.html
 load 1402419.html
 load 1402472.html
 load 1403028.html
 load 1403433.html
 load 1403465.html
 load 1403592.html
--- a/layout/style/nsComputedDOMStyle.cpp
+++ b/layout/style/nsComputedDOMStyle.cpp
@@ -710,17 +710,17 @@ nsComputedDOMStyle::DoGetStyleContextNoF
           if (presContext && presContext->StyleSet()->IsGecko()) {
             nsStyleSet* styleSet = presContext->StyleSet()->AsGecko();
             return styleSet->ResolveStyleByRemovingAnimation(
                      aElement, result->AsGecko(),
                      eRestyle_AllHintsWithAnimations);
           } else {
             return presContext->StyleSet()->AsServo()->
               GetBaseContextForElement(aElement, presContext,
-                                       aPseudo, pseudoType, result->AsServo());
+                                       pseudoType, result->AsServo());
           }
         }
 
         // this function returns an addrefed style context
         RefPtr<nsStyleContext> ret = result;
         return ret.forget();
       }
     }
@@ -737,23 +737,23 @@ nsComputedDOMStyle::DoGetStyleContextNoF
 
   // For Servo, compute the result directly without recursively building up
   // a throwaway style context chain.
   if (ServoStyleSet* servoSet = styleSet->GetAsServo()) {
     StyleRuleInclusion rules = aStyleType == eDefaultOnly
                                ? StyleRuleInclusion::DefaultOnly
                                : StyleRuleInclusion::All;
     RefPtr<ServoStyleContext> result =
-       servoSet->ResolveStyleLazily(aElement, pseudoType, aPseudo, rules);
+       servoSet->ResolveStyleLazily(aElement, pseudoType, rules);
     if (aAnimationFlag == eWithAnimation) {
       return result.forget();
     }
 
     return servoSet->GetBaseContextForElement(aElement, presContext,
-                                              aPseudo, pseudoType, result);
+                                              pseudoType, result);
   }
 
   RefPtr<GeckoStyleContext> parentContext;
   nsIContent* parent = aPseudo ? aElement : aElement->GetParent();
   // Don't resolve parent context for document fragments.
   if (parent && parent->IsElement()) {
     RefPtr<nsStyleContext> p =
       GetStyleContextNoFlush(parent->AsElement(), nullptr,
--- a/memory/build/mozjemalloc.cpp
+++ b/memory/build/mozjemalloc.cpp
@@ -502,27 +502,22 @@ static size_t recycled_size;
 
 /*
  * Mutexes based on spinlocks.  We can't use normal pthread spinlocks in all
  * places, because they require malloc()ed memory, which causes bootstrapping
  * issues in some cases.
  */
 #if defined(XP_WIN)
 #define malloc_mutex_t CRITICAL_SECTION
-#define malloc_spinlock_t CRITICAL_SECTION
 #elif defined(XP_DARWIN)
 struct malloc_mutex_t {
 	OSSpinLock	lock;
 };
-struct malloc_spinlock_t {
-	OSSpinLock	lock;
-};
 #else
 typedef pthread_mutex_t malloc_mutex_t;
-typedef pthread_mutex_t malloc_spinlock_t;
 #endif
 
 /* Set to true once the allocator has been initialized. */
 static bool malloc_initialized = false;
 
 #if defined(XP_WIN)
 /* No init lock for Windows. */
 #elif defined(XP_DARWIN)
@@ -668,17 +663,17 @@ class AddressRadixTree {
 #endif
   static const size_t kBitsPerLevel = kNodeSize2Pow - SIZEOF_PTR_2POW;
   static const size_t kBitsAtLevel1 =
     (Bits % kBitsPerLevel) ? Bits % kBitsPerLevel : kBitsPerLevel;
   static const size_t kHeight = (Bits + kBitsPerLevel - 1) / kBitsPerLevel;
   static_assert(kBitsAtLevel1 + (kHeight - 1) * kBitsPerLevel == Bits,
                 "AddressRadixTree parameters don't work out");
 
-  malloc_spinlock_t mLock;
+  malloc_mutex_t mLock;
   void** mRoot;
 
 public:
   bool Init();
 
   inline void* Get(void* aAddr);
 
   // Returns whether the value was properly set
@@ -925,17 +920,17 @@ struct arena_t {
 #  define ARENA_MAGIC 0x947d3d24
 #endif
 
   arena_id_t mId;
   /* Linkage for the tree of arenas by id. */
   RedBlackTreeNode<arena_t> mLink;
 
   /* All operations on this arena require that lock be locked. */
-  malloc_spinlock_t mLock;
+  malloc_mutex_t mLock;
 
   arena_stats_t mStats;
 
 private:
   /* Tree of dirty-page-containing chunks this arena manages. */
   RedBlackTree<arena_chunk_t, ArenaDirtyChunkTrait> mChunksDirty;
 
 #ifdef MALLOC_DOUBLE_PURGE
@@ -1119,17 +1114,17 @@ static size_t		base_committed;
  * Arenas.
  */
 
 // A tree of all available arenas, arranged by id.
 // TODO: Move into arena_t as a static member when rb_tree doesn't depend on
 // the type being defined anymore.
 static RedBlackTree<arena_t, ArenaTreeTrait> gArenaTree;
 static unsigned narenas;
-static malloc_spinlock_t arenas_lock; /* Protects arenas initialization. */
+static malloc_mutex_t arenas_lock; /* Protects arenas initialization. */
 
 /*
  * The arena associated with the current thread (per jemalloc_thread_local_arena)
  * On OSX, __thread/thread_local circles back calling malloc to allocate storage
  * on first access on each thread, which leads to an infinite loop, but
  * pthread-based TLS somehow doesn't have this problem.
  */
 #if !defined(XP_DARWIN)
@@ -1296,90 +1291,21 @@ malloc_mutex_unlock(malloc_mutex_t *mute
 	LeaveCriticalSection(mutex);
 #elif defined(XP_DARWIN)
 	OSSpinLockUnlock(&mutex->lock);
 #else
 	pthread_mutex_unlock(mutex);
 #endif
 }
 
-#if (defined(__GNUC__))
-__attribute__((unused))
-#  endif
-static bool
-malloc_spin_init(malloc_spinlock_t *lock)
-{
-#if defined(XP_WIN)
-	if (!InitializeCriticalSectionAndSpinCount(lock, _CRT_SPINCOUNT))
-			return (true);
-#elif defined(XP_DARWIN)
-	lock->lock = OS_SPINLOCK_INIT;
-#elif defined(XP_LINUX) && !defined(ANDROID)
-	pthread_mutexattr_t attr;
-	if (pthread_mutexattr_init(&attr) != 0)
-		return (true);
-	pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
-	if (pthread_mutex_init(lock, &attr) != 0) {
-		pthread_mutexattr_destroy(&attr);
-		return (true);
-	}
-	pthread_mutexattr_destroy(&attr);
-#else
-	if (pthread_mutex_init(lock, nullptr) != 0)
-		return (true);
-#endif
-	return (false);
-}
-
-static inline void
-malloc_spin_lock(malloc_spinlock_t *lock)
-{
-
-#if defined(XP_WIN)
-	EnterCriticalSection(lock);
-#elif defined(XP_DARWIN)
-	OSSpinLockLock(&lock->lock);
-#else
-	pthread_mutex_lock(lock);
-#endif
-}
-
-static inline void
-malloc_spin_unlock(malloc_spinlock_t *lock)
-{
-#if defined(XP_WIN)
-	LeaveCriticalSection(lock);
-#elif defined(XP_DARWIN)
-	OSSpinLockUnlock(&lock->lock);
-#else
-	pthread_mutex_unlock(lock);
-#endif
-}
-
 /*
  * End mutex.
  */
 /******************************************************************************/
 /*
- * Begin spin lock.  Spin locks here are actually adaptive mutexes that block
- * after a period of spinning, because unbounded spinning would allow for
- * priority inversion.
- */
-
-#if !defined(XP_DARWIN)
-#  define	malloc_spin_init	malloc_mutex_init
-#  define	malloc_spin_lock	malloc_mutex_lock
-#  define	malloc_spin_unlock	malloc_mutex_unlock
-#endif
-
-/*
- * End spin lock.
- */
-/******************************************************************************/
-/*
  * Begin Utility functions/macros.
  */
 
 /* Return the chunk address for allocation address a. */
 #define	CHUNK_ADDR2BASE(a)						\
 	((void *)((uintptr_t)(a) & ~chunksize_mask))
 
 /* Return the chunk offset of address a. */
@@ -1748,17 +1674,17 @@ pages_copy(void *dest, const void *src, 
 	    (vm_address_t)dest);
 }
 #endif
 
 template <size_t Bits>
 bool
 AddressRadixTree<Bits>::Init()
 {
-  malloc_spin_init(&mLock);
+  malloc_mutex_init(&mLock);
 
   mRoot = (void**)base_calloc(1 << kBitsAtLevel1, sizeof(void*));
   return mRoot;
 }
 
 template <size_t Bits>
 void**
 AddressRadixTree<Bits>::GetSlot(void* aKey, bool aCreate)
@@ -1802,52 +1728,52 @@ AddressRadixTree<Bits>::Get(void* aKey)
   void* ret = nullptr;
 
   void** slot = GetSlot(aKey);
 
   if (slot) {
     ret = *slot;
   }
 #ifdef MOZ_DEBUG
-  malloc_spin_lock(&mlock);
+  malloc_mutex_lock(&mlock);
   /*
    * Suppose that it were possible for a jemalloc-allocated chunk to be
    * munmap()ped, followed by a different allocator in another thread re-using
    * overlapping virtual memory, all without invalidating the cached rtree
    * value.  The result would be a false positive (the rtree would claim that
    * jemalloc owns memory that it had actually discarded).  I don't think this
    * scenario is possible, but the following assertion is a prudent sanity
    * check.
    */
   if (!slot) {
     // In case a slot has been created in the meantime.
     slot = GetSlot(aKey);
   }
   if (slot) {
-    // The malloc_spin_lock call above should act as a memory barrier, forcing
+    // The malloc_mutex_lock call above should act as a memory barrier, forcing
     // the compiler to emit a new read instruction for *slot.
     MOZ_ASSERT(ret == *slot);
   } else {
     MOZ_ASSERT(ret == nullptr);
   }
-  malloc_spin_unlock(&mlock);
+  malloc_mutex_unlock(&mlock);
 #endif
   return ret;
 }
 
 template <size_t Bits>
 bool
 AddressRadixTree<Bits>::Set(void* aKey, void* aValue)
 {
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   void** slot = GetSlot(aKey, /* create */ true);
   if (slot) {
     *slot = aValue;
   }
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
   return slot;
 }
 
 /* pages_trim, chunk_alloc_mmap_slow and chunk_alloc_mmap were cherry-picked
  * from upstream jemalloc 3.4.1 to fix Mozilla bug 956501. */
 
 /* Return the offset between a and the nearest aligned address at or below a. */
 #define        ALIGNMENT_ADDR2OFFSET(a, alignment)                                \
@@ -3189,30 +3115,30 @@ arena_t::MallocSmall(size_t aSize, bool 
   } else {
     /* Sub-page. */
     aSize = pow2_ceil(aSize);
     bin = &mBins[ntbins + nqbins
         + (ffs((int)(aSize >> SMALL_MAX_2POW_DEFAULT)) - 2)];
   }
   MOZ_DIAGNOSTIC_ASSERT(aSize == bin->reg_size);
 
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   if ((run = bin->runcur) && run->nfree > 0) {
     ret = MallocBinEasy(bin, run);
   } else {
     ret = MallocBinHard(bin);
   }
 
   if (!ret) {
-    malloc_spin_unlock(&mLock);
+    malloc_mutex_unlock(&mLock);
     return nullptr;
   }
 
   mStats.allocated_small += aSize;
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 
   if (aZero == false) {
     if (opt_junk) {
       memset(ret, kAllocJunk, aSize);
     } else if (opt_zero) {
       memset(ret, 0, aSize);
     }
   } else
@@ -3223,24 +3149,24 @@ arena_t::MallocSmall(size_t aSize, bool 
 
 void*
 arena_t::MallocLarge(size_t aSize, bool aZero)
 {
   void* ret;
 
   /* Large allocation. */
   aSize = PAGE_CEILING(aSize);
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   ret = AllocRun(nullptr, aSize, true, aZero);
   if (!ret) {
-    malloc_spin_unlock(&mLock);
+    malloc_mutex_unlock(&mLock);
     return nullptr;
   }
   mStats.allocated_large += aSize;
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 
   if (aZero == false) {
     if (opt_junk) {
       memset(ret, kAllocJunk, aSize);
     } else if (opt_zero) {
       memset(ret, 0, aSize);
     }
   }
@@ -3277,20 +3203,20 @@ arena_t::Palloc(size_t aAlignment, size_
 {
   void* ret;
   size_t offset;
   arena_chunk_t* chunk;
 
   MOZ_ASSERT((aSize & pagesize_mask) == 0);
   MOZ_ASSERT((aAlignment & pagesize_mask) == 0);
 
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   ret = AllocRun(nullptr, aAllocSize, true, false);
   if (!ret) {
-    malloc_spin_unlock(&mLock);
+    malloc_mutex_unlock(&mLock);
     return nullptr;
   }
 
   chunk = (arena_chunk_t*)CHUNK_ADDR2BASE(ret);
 
   offset = uintptr_t(ret) & (aAlignment - 1);
   MOZ_ASSERT((offset & pagesize_mask) == 0);
   MOZ_ASSERT(offset < aAllocSize);
@@ -3309,17 +3235,17 @@ arena_t::Palloc(size_t aAlignment, size_
     if (trailsize != 0) {
       /* Trim trailing space. */
       MOZ_ASSERT(trailsize < aAllocSize);
       TrimRunTail(chunk, (arena_run_t*)ret, aSize + trailsize, aSize, false);
     }
   }
 
   mStats.allocated_large += aSize;
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 
   if (opt_junk) {
     memset(ret, kAllocJunk, aSize);
   } else if (opt_zero) {
     memset(ret, 0, aSize);
   }
   return ret;
 }
@@ -3750,28 +3676,28 @@ arena_dalloc(void *ptr, size_t offset)
 	MOZ_ASSERT(offset != 0);
 	MOZ_ASSERT(CHUNK_ADDR2OFFSET(ptr) == offset);
 
 	chunk = (arena_chunk_t *) ((uintptr_t)ptr - offset);
 	arena = chunk->arena;
 	MOZ_ASSERT(arena);
 	MOZ_DIAGNOSTIC_ASSERT(arena->mMagic == ARENA_MAGIC);
 
-	malloc_spin_lock(&arena->mLock);
+	malloc_mutex_lock(&arena->mLock);
 	pageind = offset >> pagesize_2pow;
 	mapelm = &chunk->map[pageind];
 	MOZ_DIAGNOSTIC_ASSERT((mapelm->bits & CHUNK_MAP_ALLOCATED) != 0);
 	if ((mapelm->bits & CHUNK_MAP_LARGE) == 0) {
 		/* Small allocation. */
 		arena->DallocSmall(chunk, ptr, mapelm);
 	} else {
 		/* Large allocation. */
 		arena->DallocLarge(chunk, ptr);
 	}
-	malloc_spin_unlock(&arena->mLock);
+	malloc_mutex_unlock(&arena->mLock);
 }
 
 static inline void
 idalloc(void *ptr)
 {
 	size_t offset;
 
 	MOZ_ASSERT(ptr);
@@ -3788,30 +3714,30 @@ arena_t::RallocShrinkLarge(arena_chunk_t
                            size_t aOldSize)
 {
   MOZ_ASSERT(aSize < aOldSize);
 
   /*
    * Shrink the run, and make trailing pages available for other
    * allocations.
    */
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   TrimRunTail(aChunk, (arena_run_t*)aPtr, aOldSize, aSize, true);
   mStats.allocated_large -= aOldSize - aSize;
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 }
 
 bool
 arena_t::RallocGrowLarge(arena_chunk_t* aChunk, void* aPtr, size_t aSize,
                          size_t aOldSize)
 {
   size_t pageind = (uintptr_t(aPtr) - uintptr_t(aChunk)) >> pagesize_2pow;
   size_t npages = aOldSize >> pagesize_2pow;
 
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
   MOZ_DIAGNOSTIC_ASSERT(aOldSize == (aChunk->map[pageind].bits & ~pagesize_mask));
 
   /* Try to extend the run. */
   MOZ_ASSERT(aSize > aOldSize);
   if (pageind + npages < chunk_npages && (aChunk->map[pageind+npages].bits
       & CHUNK_MAP_ALLOCATED) == 0 && (aChunk->map[pageind+npages].bits &
       ~pagesize_mask) >= aSize - aOldSize) {
     /*
@@ -3824,20 +3750,20 @@ arena_t::RallocGrowLarge(arena_chunk_t* 
         false);
 
     aChunk->map[pageind].bits = aSize | CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
     aChunk->map[pageind+npages].bits = CHUNK_MAP_LARGE |
         CHUNK_MAP_ALLOCATED;
 
     mStats.allocated_large += aSize - aOldSize;
-    malloc_spin_unlock(&mLock);
+    malloc_mutex_unlock(&mLock);
     return false;
   }
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 
   return true;
 }
 
 /*
  * Try to resize a large allocation, in order to avoid copying.  This will
  * always fail if growing an object, and the following run is already in use.
  */
@@ -3958,17 +3884,17 @@ iralloc(void* aPtr, size_t aSize, arena_
 
 bool
 arena_t::Init()
 {
   unsigned i;
   arena_bin_t* bin;
   size_t prev_run_size;
 
-  if (malloc_spin_init(&mLock))
+  if (malloc_mutex_init(&mLock))
     return true;
 
   memset(&mLink, 0, sizeof(mLink));
   memset(&mStats, 0, sizeof(arena_stats_t));
 
   /* Initialize chunks. */
   mChunksDirty.Init();
 #ifdef MALLOC_DOUBLE_PURGE
@@ -4057,23 +3983,23 @@ arenas_extend()
 
   /* Allocate enough space for trailing bins. */
   ret = (arena_t *)base_alloc(sizeof(arena_t)
       + (sizeof(arena_bin_t) * (ntbins + nqbins + nsbins - 1)));
   if (!ret || ret->Init()) {
     return arenas_fallback();
   }
 
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
 
   // TODO: Use random Ids.
   ret->mId = narenas++;
   gArenaTree.Insert(ret);
 
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
   return ret;
 }
 
 /*
  * End arena.
  */
 /******************************************************************************/
 /*
@@ -4468,17 +4394,17 @@ MALLOC_OUT:
   huge_mapped = 0;
 
   /* Initialize base allocation data structures. */
   base_mapped = 0;
   base_committed = 0;
   base_nodes = nullptr;
   malloc_mutex_init(&base_mtx);
 
-  malloc_spin_init(&arenas_lock);
+  malloc_mutex_init(&arenas_lock);
 
   /*
    * Initialize one arena here.
    */
   gArenaTree.Init();
   arenas_extend();
   gMainArena = gArenaTree.First();
   if (!gMainArena) {
@@ -4833,31 +4759,31 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
 
   /* Get base mapped/allocated. */
   malloc_mutex_lock(&base_mtx);
   non_arena_mapped += base_mapped;
   aStats->bookkeeping += base_committed;
   MOZ_ASSERT(base_mapped >= base_committed);
   malloc_mutex_unlock(&base_mtx);
 
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
   /* Iterate over arenas. */
   for (auto arena : gArenaTree.iter()) {
     size_t arena_mapped, arena_allocated, arena_committed, arena_dirty, j,
            arena_unused, arena_headers;
     arena_run_t* run;
 
     if (!arena) {
       continue;
     }
 
     arena_headers = 0;
     arena_unused = 0;
 
-    malloc_spin_lock(&arena->mLock);
+    malloc_mutex_lock(&arena->mLock);
 
     arena_mapped = arena->mStats.mapped;
 
     /* "committed" counts dirty and allocated memory. */
     arena_committed = arena->mStats.committed << pagesize_2pow;
 
     arena_allocated = arena->mStats.allocated_small +
                       arena->mStats.allocated_large;
@@ -4876,32 +4802,32 @@ MozJemalloc::jemalloc_stats(jemalloc_sta
       if (bin->runcur) {
         bin_unused += bin->runcur->nfree * bin->reg_size;
       }
 
       arena_unused += bin_unused;
       arena_headers += bin->stats.curruns * bin->reg0_offset;
     }
 
-    malloc_spin_unlock(&arena->mLock);
+    malloc_mutex_unlock(&arena->mLock);
 
     MOZ_ASSERT(arena_mapped >= arena_committed);
     MOZ_ASSERT(arena_committed >= arena_allocated + arena_dirty);
 
     /* "waste" is committed memory that is neither dirty nor
      * allocated. */
     aStats->mapped += arena_mapped;
     aStats->allocated += arena_allocated;
     aStats->page_cache += arena_dirty;
     aStats->waste += arena_committed -
         arena_allocated - arena_dirty - arena_unused - arena_headers;
     aStats->bin_unused += arena_unused;
     aStats->bookkeeping += arena_headers;
   }
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
 
   /* Account for arena chunk headers in bookkeeping rather than waste. */
   chunk_header_size =
       ((aStats->mapped / aStats->chunksize) * arena_chunk_header_npages) <<
       pagesize_2pow;
 
   aStats->mapped += non_arena_mapped;
   aStats->bookkeeping += chunk_header_size;
@@ -4941,89 +4867,89 @@ hard_purge_chunk(arena_chunk_t *chunk)
 		i += npages;
 	}
 }
 
 /* Explicitly remove all of this arena's MADV_FREE'd pages from memory. */
 void
 arena_t::HardPurge()
 {
-  malloc_spin_lock(&mLock);
+  malloc_mutex_lock(&mLock);
 
   while (!mChunksMAdvised.isEmpty()) {
     arena_chunk_t* chunk = mChunksMAdvised.popFront();
     hard_purge_chunk(chunk);
   }
 
-  malloc_spin_unlock(&mLock);
+  malloc_mutex_unlock(&mLock);
 }
 
 template<> inline void
 MozJemalloc::jemalloc_purge_freed_pages()
 {
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
   for (auto arena : gArenaTree.iter()) {
     arena->HardPurge();
   }
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
 }
 
 #else /* !defined MALLOC_DOUBLE_PURGE */
 
 template<> inline void
 MozJemalloc::jemalloc_purge_freed_pages()
 {
   /* Do nothing. */
 }
 
 #endif /* defined MALLOC_DOUBLE_PURGE */
 
 
 template<> inline void
 MozJemalloc::jemalloc_free_dirty_pages(void)
 {
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
   for (auto arena : gArenaTree.iter()) {
-    malloc_spin_lock(&arena->mLock);
+    malloc_mutex_lock(&arena->mLock);
     arena->Purge(true);
-    malloc_spin_unlock(&arena->mLock);
+    malloc_mutex_unlock(&arena->mLock);
   }
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
 }
 
 inline arena_t*
 arena_t::GetById(arena_id_t aArenaId)
 {
   arena_t key;
   key.mId = aArenaId;
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
   arena_t* result = gArenaTree.Search(&key);
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
   MOZ_RELEASE_ASSERT(result);
   return result;
 }
 
 #ifdef NIGHTLY_BUILD
 template<> inline arena_id_t
 MozJemalloc::moz_create_arena()
 {
   arena_t* arena = arenas_extend();
   return arena->mId;
 }
 
 template<> inline void
 MozJemalloc::moz_dispose_arena(arena_id_t aArenaId)
 {
   arena_t* arena = arena_t::GetById(aArenaId);
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
   gArenaTree.Remove(arena);
   // The arena is leaked, and remaining allocations in it still are alive
   // until they are freed. After that, the arena will be empty but still
   // taking have at least a chunk taking address space. TODO: bug 1364359.
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
 }
 
 #define MALLOC_DECL(name, return_type, ...) \
   template<> inline return_type \
   MozJemalloc::moz_arena_ ## name(arena_id_t aArenaId, ARGS_HELPER(TYPED_ARGS, ##__VA_ARGS__)) \
   { \
     BaseAllocator allocator(arena_t::GetById(aArenaId)); \
     return allocator.name(ARGS_HELPER(ARGS, ##__VA_ARGS__)); \
@@ -5058,20 +4984,20 @@ MozJemalloc::moz_dispose_arena(arena_id_
 #ifndef XP_DARWIN
 static
 #endif
 void
 _malloc_prefork(void)
 {
   /* Acquire all mutexes in a safe order. */
 
-  malloc_spin_lock(&arenas_lock);
+  malloc_mutex_lock(&arenas_lock);
 
   for (auto arena : gArenaTree.iter()) {
-    malloc_spin_lock(&arena->mLock);
+    malloc_mutex_lock(&arena->mLock);
   }
 
   malloc_mutex_lock(&base_mtx);
 
   malloc_mutex_lock(&huge_mtx);
 }
 
 #ifndef XP_DARWIN
@@ -5082,37 +5008,37 @@ void
 {
   /* Release all mutexes, now that fork() has completed. */
 
   malloc_mutex_unlock(&huge_mtx);
 
   malloc_mutex_unlock(&base_mtx);
 
   for (auto arena : gArenaTree.iter()) {
-    malloc_spin_unlock(&arena->mLock);
+    malloc_mutex_unlock(&arena->mLock);
   }
-  malloc_spin_unlock(&arenas_lock);
+  malloc_mutex_unlock(&arenas_lock);
 }
 
 #ifndef XP_DARWIN
 static
 #endif
 void
 _malloc_postfork_child(void)
 {
   /* Reinitialize all mutexes, now that fork() has completed. */
 
   malloc_mutex_init(&huge_mtx);
 
   malloc_mutex_init(&base_mtx);
 
   for (auto arena : gArenaTree.iter()) {
-    malloc_spin_init(&arena->mLock);
+    malloc_mutex_init(&arena->mLock);
   }
-  malloc_spin_init(&arenas_lock);
+  malloc_mutex_init(&arenas_lock);
 }
 
 /*
  * End library-private functions.
  */
 /******************************************************************************/
 #ifdef MOZ_REPLACE_MALLOC
 
--- a/mobile/android/chrome/content/PermissionsHelper.js
+++ b/mobile/android/chrome/content/PermissionsHelper.js
@@ -139,21 +139,21 @@ var PermissionsHelper = {
    * @return A permission value defined in nsIPermissionManager.
    */
   getPermission: function getPermission(aURI, aType) {
     // Password saving isn't a nsIPermissionManager permission type, so handle
     // it seperately.
     if (aType == "password") {
       // By default, login saving is enabled, so if it is disabled, the
       // user selected the never remember option
-      if (!Services.logins.getLoginSavingEnabled(aURI.prePath))
+      if (!Services.logins.getLoginSavingEnabled(aURI.displayPrePath))
         return Services.perms.DENY_ACTION;
 
       // Check to see if the user ever actually saved a login
-      if (Services.logins.countLogins(aURI.prePath, "", ""))
+      if (Services.logins.countLogins(aURI.displayPrePath, "", ""))
         return Services.perms.ALLOW_ACTION;
 
       return Services.perms.UNKNOWN_ACTION;
     }
 
     // Geolocation consumers use testExactPermission
     if (aType == "geolocation")
       return Services.perms.testExactPermission(aURI, aType);
@@ -168,22 +168,22 @@ var PermissionsHelper = {
    *        The permission type string stored in permission manager.
    *        e.g. "geolocation", "indexedDB", "popup"
    */
   clearPermission: function clearPermission(aURI, aType, aContext) {
     // Password saving isn't a nsIPermissionManager permission type, so handle
     // it seperately.
     if (aType == "password") {
       // Get rid of exisiting stored logings
-      let logins = Services.logins.findLogins({}, aURI.prePath, "", "");
+      let logins = Services.logins.findLogins({}, aURI.displayPrePath, "", "");
       for (let i = 0; i < logins.length; i++) {
         Services.logins.removeLogin(logins[i]);
       }
       // Re-set login saving to enabled
-      Services.logins.setLoginSavingEnabled(aURI.prePath, true);
+      Services.logins.setLoginSavingEnabled(aURI.displayPrePath, true);
     } else {
       Services.perms.remove(aURI, aType);
       // Clear content prefs set in ContentPermissionPrompt.js
       Cc["@mozilla.org/content-pref/service;1"]
         .getService(Ci.nsIContentPrefService2)
         .removeByDomainAndName(aURI.spec, aType + ".request.remember", aContext);
     }
   }
--- a/mobile/android/chrome/content/browser.js
+++ b/mobile/android/chrome/content/browser.js
@@ -4124,17 +4124,17 @@ Tab.prototype = {
           WebsiteMetadata.parseAsynchronously(this.browser.contentDocument);
         }
 
         break;
       }
 
       case "DOMFormHasPassword": {
         // Send logins for this hostname to Java.
-        let hostname = aEvent.target.baseURIObject.prePath;
+        let hostname = aEvent.target.baseURIObject.displayPrePath;
         let foundLogins = Services.logins.findLogins({}, hostname, "", "");
         if (foundLogins.length > 0) {
           let displayHost = IdentityHandler.getEffectiveHost();
           let title = { text: displayHost, resource: hostname };
           let selectObj = { title: title, logins: foundLogins };
           GlobalEventDispatcher.sendRequest({
             type: "Doorhanger:Logins",
             data: selectObj
--- a/modules/libmar/verify/cryptox.c
+++ b/modules/libmar/verify/cryptox.c
@@ -106,17 +106,17 @@ NSS_VerifySignature(VFYContext * const *
  *
  * @param hash      The hash context that the signature should match.
  * @param pubKey    The public key to use on the signature.
  * @param signature The signature to check.
  * @param signatureLen The length of the signature.
  * @return CryptoX_Success on success, CryptoX_Error on error.
 */
 CryptoX_Result
-CyprtoAPI_VerifySignature(HCRYPTHASH *hash, 
+CryptoAPI_VerifySignature(HCRYPTHASH *hash,
                           HCRYPTKEY *pubKey,
                           const BYTE *signature, 
                           DWORD signatureLen)
 {
   DWORD i;
   BOOL result;
 /* Windows APIs expect the bytes in the signature to be in little-endian 
  * order, but we write the signature in big-endian order.  Other APIs like 
--- a/modules/libmar/verify/cryptox.h
+++ b/modules/libmar/verify/cryptox.h
@@ -112,17 +112,17 @@ void CryptoMac_FreePublicKey(CryptoX_Pub
 CryptoX_Result CryptoAPI_InitCryptoContext(HCRYPTPROV *provider);
 CryptoX_Result CryptoAPI_LoadPublicKey(HCRYPTPROV hProv, 
                                        BYTE *certData,
                                        DWORD sizeOfCertData,
                                        HCRYPTKEY *publicKey);
 CryptoX_Result CryptoAPI_VerifyBegin(HCRYPTPROV provider, HCRYPTHASH* hash);
 CryptoX_Result CryptoAPI_VerifyUpdate(HCRYPTHASH* hash, 
                                       BYTE *buf, DWORD len);
-CryptoX_Result CyprtoAPI_VerifySignature(HCRYPTHASH *hash, 
+CryptoX_Result CryptoAPI_VerifySignature(HCRYPTHASH *hash,
                                          HCRYPTKEY *pubKey,
                                          const BYTE *signature, 
                                          DWORD signatureLen);
 
 #define CryptoX_InvalidHandleValue ((ULONG_PTR)NULL)
 #define CryptoX_ProviderHandle HCRYPTPROV
 #define CryptoX_SignatureHandle HCRYPTHASH
 #define CryptoX_PublicKey HCRYPTKEY
@@ -132,17 +132,17 @@ CryptoX_Result CyprtoAPI_VerifySignature
 #define CryptoX_VerifyBegin(CryptoHandle, SignatureHandle, PublicKey) \
   CryptoAPI_VerifyBegin(CryptoHandle, SignatureHandle)
 #define CryptoX_FreeSignatureHandle(SignatureHandle)
 #define CryptoX_VerifyUpdate(SignatureHandle, buf, len) \
   CryptoAPI_VerifyUpdate(SignatureHandle, (BYTE *)(buf), len)
 #define CryptoX_LoadPublicKey(CryptoHandle, certData, dataSize, publicKey) \
   CryptoAPI_LoadPublicKey(CryptoHandle, (BYTE*)(certData), dataSize, publicKey)
 #define CryptoX_VerifySignature(hash, publicKey, signedData, len) \
-  CyprtoAPI_VerifySignature(hash, publicKey, signedData, len)
+  CryptoAPI_VerifySignature(hash, publicKey, signedData, len)
 #define CryptoX_FreePublicKey(key) \
   CryptDestroyKey(*(key))
 #define CryptoX_FreeCertificate(cert) \
   CertCloseStore(*(cert), CERT_CLOSE_STORE_FORCE_FLAG);
 
 #else
 
 /* This default implementation is necessary because we don't want to
--- a/python/mozbuild/mozbuild/action/langpack_manifest.py
+++ b/python/mozbuild/mozbuild/action/langpack_manifest.py
@@ -300,18 +300,17 @@ def parse_chrome_manifest(path, base_pat
 #    defines        (dict) - A dictionary of defines entries
 #    chrome_entries (dict) - A dictionary of chrome registry entries
 #
 # Returns:
 #    (dict) - a web manifest
 #
 # Example:
 #    manifest = create_webmanifest(
-#      ['pl'],
-#      '{ec8030f7-c20a-464f-9b0e-13a3a9e97384}',
+#      'pl',
 #      '57.0',
 #      '57.0.*',
 #      'Firefox',
 #      '/var/vcs/l10n-central',
 #      {'MOZ_LANG_TITLE': 'Polski'},
 #      chrome_entries
 #    )
 #    manifest == {
@@ -351,17 +350,17 @@ def parse_chrome_manifest(path, base_pat
 ###
 def create_webmanifest(locstr, min_app_ver, max_app_ver, app_name,
                        l10n_basedir, defines, chrome_entries):
     locales = map(lambda loc: loc.strip(), locstr.split(','))
     main_locale = locales[0]
 
     author = build_author_string(
         defines['MOZ_LANGPACK_CREATOR'],
-        defines['MOZ_LANGPACK_CONTRIBUTORS']
+        defines['MOZ_LANGPACK_CONTRIBUTORS'] if 'MOZ_LANGPACK_CONTRIBUTORS' in defines else ""
     )
 
     manifest = {
         'langpack_id': main_locale,
         'manifest_version': 2,
         'applications': {
             'gecko': {
                 'id': 'langpack-{0}@firefox.mozilla.org'.format(main_locale),
new file mode 100644
--- /dev/null
+++ b/python/mozbuild/mozbuild/test/action/test_langpack_manifest.py
@@ -0,0 +1,63 @@
+# -*- coding: utf-8 -*-
+
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+
+import unittest
+import json
+
+import mozunit
+
+import mozbuild.action.langpack_manifest as langpack_manifest
+from mozbuild.preprocessor import Context
+
+
+class TestGenerateManifest(unittest.TestCase):
+    """
+    Unit tests for langpack_manifest.py.
+    """
+
+    def test_manifest(self):
+        ctx = Context()
+        ctx['MOZ_LANG_TITLE'] = 'Finnish'
+        ctx['MOZ_LANGPACK_CREATOR'] = 'Suomennosprojekti'
+        ctx['MOZ_LANGPACK_CONTRIBUTORS'] = """
+            <em:contributor>Joe Smith</em:contributor>
+            <em:contributor>Mary White</em:contributor>
+        """
+        manifest = langpack_manifest.create_webmanifest(
+            'fi',
+            '57.0',
+            '57.0.*',
+            'Firefox',
+            '/var/vcs/l10n-central',
+            ctx,
+            {},
+        )
+
+        data = json.loads(manifest)
+        self.assertEquals(data['name'], 'Finnish Language Pack')
+        self.assertEquals(
+            data['author'], 'Suomennosprojekti (contributors: Joe Smith, Mary White)')
+
+    def test_manifest_without_contributors(self):
+        ctx = Context()
+        ctx['MOZ_LANG_TITLE'] = 'Finnish'
+        ctx['MOZ_LANGPACK_CREATOR'] = 'Suomennosprojekti'
+        manifest = langpack_manifest.create_webmanifest(
+            'fi',
+            '57.0',
+            '57.0.*',
+            'Firefox',
+            '/var/vcs/l10n-central',
+            ctx,
+            {},
+        )
+
+        data = json.loads(manifest)
+        self.assertEquals(data['name'], 'Finnish Language Pack')
+        self.assertEquals(data['author'], 'Suomennosprojekti')
+
+
+if __name__ == '__main__':
+    mozunit.main()
--- a/python/mozbuild/mozbuild/test/python.ini
+++ b/python/mozbuild/mozbuild/test/python.ini
@@ -1,10 +1,11 @@
 [action/test_buildlist.py]
 [action/test_generate_browsersearch.py]
+[action/test_langpack_manifest.py]
 [action/test_package_fennec_apk.py]
 [backend/test_build.py]
 [backend/test_configenvironment.py]
 [backend/test_partialconfigenvironment.py]
 [backend/test_recursivemake.py]
 [backend/test_test_manifest.py]
 [backend/test_visualstudio.py]
 [codecoverage/test_lcov_rewrite.py]
--- a/services/sync/modules/engines/clients.js
+++ b/services/sync/modules/engines/clients.js
@@ -305,20 +305,37 @@ ClientEngine.prototype = {
     const allCommands = await this._readCommands();
     delete allCommands[clientId];
     await this._saveCommands(allCommands);
   },
 
   async updateKnownStaleClients() {
     this._log.debug("Updating the known stale clients");
     await this._refreshKnownStaleClients();
-    for (let client of Object.values(this._store._remoteClients)) {
-      if (client.fxaDeviceId && this._knownStaleFxADeviceIds.includes(client.fxaDeviceId)) {
+    let localFxADeviceId = await fxAccounts.getDeviceId();
+    // Process newer records first, so that if we hit a record with a device ID
+    // we've seen before, we can mark it stale immediately.
+    let clientList = Object.values(this._store._remoteClients).sort((a, b) =>
+      b.serverLastModified - a.serverLastModified);
+    let seenDeviceIds = new Set([localFxADeviceId]);
+    for (let client of clientList) {
+      // Clients might not have an `fxaDeviceId` if they fail the FxA
+      // registration process.
+      if (!client.fxaDeviceId) {
+        continue;
+      }
+      if (this._knownStaleFxADeviceIds.includes(client.fxaDeviceId)) {
         this._log.info(`Hiding stale client ${client.id} - in known stale clients list`);
         client.stale = true;
+      } else if (seenDeviceIds.has(client.fxaDeviceId)) {
+        this._log.info(`Hiding stale client ${client.id}` +
+                       ` - duplicate device id ${client.fxaDeviceId}`);
+        client.stale = true;
+      } else {
+        seenDeviceIds.add(client.fxaDeviceId);
       }
     }
   },
 
   // We assume that clients not present in the FxA Device Manager list have been
   // disconnected and so are stale
   async _refreshKnownStaleClients() {
     this._log.debug("Refreshing the known stale clients list");
@@ -364,39 +381,54 @@ ClientEngine.prototype = {
       // never see them. We also do this to filter out stale clients from the
       // tabs collection, since showing their list of tabs is confusing.
       for (let id in this._store._remoteClients) {
         if (!this._incomingClients[id]) {
           this._log.info(`Removing local state for deleted client ${id}`);
           await this._removeRemoteClient(id);
         }
       }
+      let localFxADeviceId = await fxAccounts.getDeviceId();
       // Bug 1264498: Mobile clients don't remove themselves from the clients
       // collection when the user disconnects Sync, so we mark as stale clients
       // with the same name that haven't synced in over a week.
       // (Note we can't simply delete them, or we re-apply them next sync - see
       // bug 1287687)
       delete this._incomingClients[this.localID];
       let names = new Set([this.localName]);
-      for (let [id, serverLastModified] of Object.entries(this._incomingClients)) {
+      let seenDeviceIds = new Set([localFxADeviceId]);
+      let idToLastModifiedList = Object.entries(this._incomingClients)
+                                 .sort((a, b) => b[1] - a[1]);
+      for (let [id, serverLastModified] of idToLastModifiedList) {
         let record = this._store._remoteClients[id];
         // stash the server last-modified time on the record.
         record.serverLastModified = serverLastModified;
         if (record.fxaDeviceId && this._knownStaleFxADeviceIds.includes(record.fxaDeviceId)) {
           this._log.info(`Hiding stale client ${id} - in known stale clients list`);
           record.stale = true;
         }
         if (!names.has(record.name)) {
+          if (record.fxaDeviceId) {
+            seenDeviceIds.add(record.fxaDeviceId);
+          }
           names.add(record.name);
           continue;
         }
         let remoteAge = AsyncResource.serverTime - this._incomingClients[id];
         if (remoteAge > STALE_CLIENT_REMOTE_AGE) {
           this._log.info(`Hiding stale client ${id} with age ${remoteAge}`);
           record.stale = true;
+          continue;
+        }
+        if (record.fxaDeviceId && seenDeviceIds.has(record.fxaDeviceId)) {
+          this._log.info(`Hiding stale client ${record.id}` +
+                         ` - duplicate device id ${record.fxaDeviceId}`);
+          record.stale = true;
+        } else if (record.fxaDeviceId) {
+          seenDeviceIds.add(record.fxaDeviceId);
         }
       }
     } finally {
       this._incomingClients = null;
     }
   },
 
   async _uploadOutgoing() {
--- a/services/sync/tests/unit/test_clients_engine.js
+++ b/services/sync/tests/unit/test_clients_engine.js
@@ -865,16 +865,78 @@ add_task(async function test_clients_not
       let collection = server.getCollection("foo", "clients");
       collection.remove(remoteId);
     } finally {
       await promiseStopServer(server);
     }
   }
 });
 
+
+add_task(async function test_dupe_device_ids() {
+  _("Ensure that we mark devices with duplicate fxaDeviceIds but older lastModified as stale.");
+
+  await engine._store.wipe();
+  await generateNewKeys(Service.collectionKeys);
+
+  let server   = await serverForFoo(engine);
+  await SyncTestingInfrastructure(server);
+
+  let remoteId = Utils.makeGUID();
+  let remoteId2 = Utils.makeGUID();
+  let remoteDeviceId = Utils.makeGUID();
+
+  _("Create remote client records");
+  server.insertWBO("foo", "clients", new ServerWBO(remoteId, encryptPayload({
+    id: remoteId,
+    name: "Remote client",
+    type: "desktop",
+    commands: [],
+    version: "48",
+    fxaDeviceId: remoteDeviceId,
+    protocols: ["1.5"],
+  }), Date.now() / 1000 - 30000));
+  server.insertWBO("foo", "clients", new ServerWBO(remoteId2, encryptPayload({
+    id: remoteId2,
+    name: "Remote client",
+    type: "desktop",
+    commands: [],
+    version: "48",
+    fxaDeviceId: remoteDeviceId,
+    protocols: ["1.5"],
+  }), Date.now() / 1000));
+
+  let fxAccounts = engine.fxAccounts;
+  engine.fxAccounts = {
+    notifyDevices() { return Promise.resolve(true); },
+    getDeviceId() { return fxAccounts.getDeviceId(); },
+    getDeviceList() { return Promise.resolve([{ id: remoteDeviceId }]); }
+  };
+
+  try {
+    _("Syncing.");
+    await syncClientsEngine(server);
+
+    ok(engine._store._remoteClients[remoteId].stale);
+    ok(!engine._store._remoteClients[remoteId2].stale);
+
+  } finally {
+    engine.fxAccounts = fxAccounts;
+    await cleanup();
+
+    try {
+      let collection = server.getCollection("foo", "clients");
+      collection.remove(remoteId);
+    } finally {
+      await promiseStopServer(server);
+    }
+  }
+});
+
+
 add_task(async function test_send_uri_to_client_for_display() {
   _("Ensure sendURIToClientForDisplay() sends command properly.");
 
   let tracker = engine._tracker;
   let store = engine._store;
 
   let remoteId = Utils.makeGUID();
   let rec = new ClientsRec("clients", remoteId);
--- a/servo/Cargo.lock
+++ b/servo/Cargo.lock
@@ -174,21 +174,16 @@ dependencies = [
  "quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "regex 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
 name = "bitflags"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "bitflags"
 version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "bitflags"
 version = "0.8.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
@@ -2394,25 +2389,16 @@ dependencies = [
  "log 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)",
  "servo_config 0.0.1",
  "signpost 0.1.0 (git+https://github.com/pcwalton/signpost.git)",
  "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "pulldown-cmark"
-version = "0.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "getopts 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "push-trait"
 version = "0.6.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "cfg-if 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "len-trait 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -2643,17 +2629,17 @@ dependencies = [
  "servo_atoms 0.0.1",
  "servo_config 0.0.1",
  "servo_geometry 0.0.1",
  "servo_rand 0.0.1",
  "servo_url 0.0.1",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "style 0.0.1",
  "style_traits 0.0.1",
- "swapper 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "swapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)",
  "tinyfiledialogs 2.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "unicode-segmentation 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "uuid 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "webrender_api 0.52.0 (git+https://github.com/servo/webrender)",
  "webvr_traits 0.0.1",
@@ -3062,25 +3048,16 @@ name = "siphasher"
 version = "0.2.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "size_of_test"
 version = "0.0.1"
 
 [[package]]
-name = "skeptic"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "pulldown-cmark 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "slab"
 version = "0.3.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "smallbitvec"
 version = "1.0.6"
 source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -3259,21 +3236,18 @@ dependencies = [
  "size_of_test 0.0.1",
  "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "style 0.0.1",
  "style_traits 0.0.1",
 ]
 
 [[package]]
 name = "swapper"
-version = "0.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "skeptic 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
+version = "0.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
 
 [[package]]
 name = "syn"
 version = "0.11.11"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
  "synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3342,24 +3316,16 @@ dependencies = [
 [[package]]
 name = "task_info"
 version = "0.0.1"
 dependencies = [
  "gcc 0.3.47 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "tempdir"
-version = "0.3.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
 name = "tendril"
 version = "0.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "futf 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "mac 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "utf-8 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -3797,17 +3763,16 @@ dependencies = [
 "checksum audio-video-metadata 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f9a5fd5696539cf7a0c2f826be1d13f9a8673be2f9632c8b62f5b122f7e74416"
 "checksum azure 0.21.2 (git+https://github.com/servo/rust-azure)" = "<none>"
 "checksum backtrace 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "72f9b4182546f4b04ebc4ab7f84948953a118bd6021a1b6a6c909e3e94f6be76"
 "checksum backtrace-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3a0d842ea781ce92be2bf78a9b38883948542749640b8378b3b2f03d1fd9f1ff"
 "checksum base64 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "96434f987501f0ed4eb336a411e0631ecd1afa11574fe148587adc4ff96143c9"
 "checksum binary-space-partition 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff"
 "checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e"
 "checksum bindgen 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0c338079dafc81bef7d581f494b906603d12359c4306979eae6ca081925a4984"
-"checksum bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4f67931368edf3a9a51d29886d245f1c3db2f1ef0dcc9e35ff70341b78c10d23"
 "checksum bitflags 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aad18937a628ec6abcd26d1489012cc0e18c21798210f491af69ded9b881106d"
 "checksum bitflags 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1370e9fc2a6ae53aea8b7a5110edbd08836ed87c88736dfabccade1c2b44bff4"
 "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
 "checksum bitreader 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "80b13e2ab064ff3aa0bdbf1eff533f9822dc37899821f5f98c67f263eab51707"
 "checksum block 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0d8c1fef690941d3e7788d328517591fecc684c084084702d6ff1641e993699a"
 "checksum blurdroid 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "d7daba519d29beebfc7d302795af88a16b43f431b9b268586926ac61cc655a68"
 "checksum blurmac 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72af3718b3f652fb2026bf9d9dd5f92332cd287884283c343f03fff16cbb0172"
 "checksum blurmock 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "68dd72da3a3bb40f3d3bdd366c4cf8e2b1d208c366304f382c80cef8126ca8da"
@@ -3975,17 +3940,16 @@ dependencies = [
 "checksum phf_codegen 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "d62594c0bb54c464f633175d502038177e90309daf2e0158be42ed5f023ce88f"
 "checksum phf_generator 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "6b07ffcc532ccc85e3afc45865469bf5d9e4ef5bfcf9622e3cfe80c2d275ec03"
 "checksum phf_shared 0.7.21 (registry+https://github.com/rust-lang/crates.io-index)" = "07e24b0ca9643bdecd0632f2b3da6b1b89bbb0030e0b992afc1113b23a7bc2f2"
 "checksum pkg-config 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "3a8b4c6b8165cd1a1cd4b9b120978131389f64bdaf456435caa41e630edba903"
 "checksum plane-split 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e57800a97ca52c556db6b6184a3201f05366ad5e11876f7d17e234589ca2fa26"
 "checksum png 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5b5c59debbb04e708775b004dce99d66984d5448625d63408ad502014d2880cd"
 "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
 "checksum procedural-masquerade 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c93cdc1fb30af9ddf3debc4afbdb0f35126cbd99daa229dd76cdd5349b41d989"
-"checksum pulldown-cmark 0.0.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1058d7bb927ca067656537eec4e02c2b4b70eaaa129664c5b90c111e20326f41"
 "checksum push-trait 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fdc13b1a53bc505b526086361221aaa612fefb9b0ecf2853f9d31f807764e004"
 "checksum quasi 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18c45c4854d6d1cf5d531db97c75880feb91c958b0720f4ec1057135fec358b3"
 "checksum quasi_codegen 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "51b9e25fa23c044c1803f43ca59c98dac608976dd04ce799411edd58ece776d4"
 "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
 "checksum rand 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "022e0636ec2519ddae48154b028864bdce4eaf7d35226ab8e65c611be97b189d"
 "checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
 "checksum rayon-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e2c21a92a5dca958fb030787c1158446c6deb7f976399b72fa8074603f169e2a"
 "checksum redox_syscall 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)" = "29dbdfd4b9df8ab31dec47c6087b7b13cbf4a776f335e4de8efba8288dda075b"
@@ -4017,34 +3981,32 @@ dependencies = [
 "checksum servo-websocket 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "8a1ff13c5d852c2793805226e688044309f2c1d8f063784805a13e99cb75b611"
 "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c"
 "checksum shared_library 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "fb04126b6fcfd2710fb5b6d18f4207b6c535f2850a7e1a43bcd526d44f30a79a"
 "checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d"
 "checksum sig 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c6649e43c1a1e68d29ed56d0dc3b5b6cf3b901da77cf107c4066b9e3da036df5"
 "checksum signpost 0.1.0 (git+https://github.com/pcwalton/signpost.git)" = "<none>"
 "checksum simd 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a94d14a2ae1f1f110937de5fb69e494372560181c7e1739a097fcc2cee37ba0"
 "checksum siphasher 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "0df90a788073e8d0235a67e50441d47db7c8ad9debd91cbf43736a2a92d36537"
-"checksum skeptic 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd7d8dc1315094150052d0ab767840376335a98ac66ef313ff911cdf439a5b69"
 "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23"
 "checksum smallbitvec 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "79b776f00dfe01df905fa3b2eaa1659522e99e3fc4a7b1334171622205c4bdcf"
 "checksum smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fcd03faf178110ab0334d74ca9631d77f94c8c11cc77fcb59538abf0025695d"
 "checksum stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "15132e0e364248108c5e2c02e3ab539be8d6f5d52a01ca9bbf27ed657316f02b"
 "checksum string_cache 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "413fc7852aeeb5472f1986ef755f561ddf0c789d3d796e65f0b6fe293ecd4ef8"
 "checksum string_cache_codegen 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "479cde50c3539481f33906a387f2bd17c8e87cb848c35b6021d41fb81ff9b4d7"
 "checksum string_cache_shared 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b1884d1bc09741d466d9b14e6d37ac89d6909cbcac41dd9ae982d4d063bbedfc"
 "checksum strsim 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b4d15c810519a91cf877e7e36e63fe068815c678181439f2f29e2562147c3694"
-"checksum swapper 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3ca610b32bb8bfc5e7f705480c3a1edfeb70b6582495d343872c8bee0dcf758c"
+"checksum swapper 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e454d048db5527d000bfddb77bd072bbf3a1e2ae785f16d9bd116e07c2ab45eb"
 "checksum syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad"
 "checksum synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6"
 "checksum synstructure 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "cf318c34a2f8381a4f3d4db2c91b45bca2b1cd8cbe56caced900647be164800c"
 "checksum syntex 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a8f5e3aaa79319573d19938ea38d068056b826db9883a5d47f86c1cecc688f0e"
 "checksum syntex_errors 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "867cc5c2d7140ae7eaad2ae9e8bf39cb18a67ca651b7834f88d46ca98faadb9c"
 "checksum syntex_pos 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "13ad4762fe52abc9f4008e85c4fb1b1fe3aa91ccb99ff4826a439c7c598e1047"
 "checksum syntex_syntax 0.58.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6e0e4dbae163dd98989464c23dd503161b338790640e11537686f2ef0f25c791"
-"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6"
 "checksum tendril 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9de21546595a0873061940d994bbbc5c35f024ae4fd61ec5c5b159115684f508"
 "checksum term 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d168af3930b369cfe245132550579d47dfd873d69470755a19c2c6568dbbd989"
 "checksum term_size 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "07b6c1ac5b3fffd75073276bca1ceed01f67a28537097a2a9539e116e50fb21a"
 "checksum thread-id 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8df7875b676fddfadffd96deea3b1124e5ede707d4884248931077518cf1f773"
 "checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
 "checksum thread_profiler 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "5920e77802b177479ab5795767fa48e68f61b2f516c2ac0041e2978dd8efe483"
 "checksum threadpool 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "59f6d3eff89920113dac9db44dde461d71d01e88a5b57b258a0466c32b5d7fe1"
 "checksum time 0.1.37 (registry+https://github.com/rust-lang/crates.io-index)" = "ffd7ccbf969a892bf83f1e441126968a07a3941c24ff522a26af9f9f4585d1a3"
--- a/servo/components/hashglobe/src/diagnostic.rs
+++ b/servo/components/hashglobe/src/diagnostic.rs
@@ -1,25 +1,24 @@
 use hash_map::HashMap;
 use std::borrow::Borrow;
 use std::hash::{BuildHasher, Hash};
-use table::SafeHash;
 
 use FailedAllocationError;
 
 #[cfg(target_pointer_width = "32")]
 const CANARY: usize = 0x42cafe99;
 #[cfg(target_pointer_width = "64")]
 const CANARY: usize = 0x42cafe9942cafe99;
 
 #[derive(Clone, Debug)]
 enum JournalEntry {
-    Insert(SafeHash),
-    GetOrInsertWith(SafeHash),
-    Remove(SafeHash),
+    Insert(usize),
+    GOIW(usize),
+    Remove(usize),
     DidClear(usize),
 }
 
 #[derive(Clone, Debug)]
 pub struct DiagnosticHashMap<K, V, S>
     where K: Eq + Hash,
           S: BuildHasher
 {
@@ -32,27 +31,33 @@ impl<K: Hash + Eq, V, S: BuildHasher> Di
     where K: Eq + Hash,
           S: BuildHasher
 {
     #[inline(always)]
     pub fn inner(&self) -> &HashMap<K, (usize, V), S> {
         &self.map
     }
 
-    #[inline(always)]
+    #[inline(never)]
     pub fn begin_mutation(&mut self) {
+        self.map.verify();
         assert!(self.readonly);
         self.readonly = false;
+        self.verify();
     }
 
-    #[inline(always)]
+    #[inline(never)]
     pub fn end_mutation(&mut self) {
+        self.map.verify();
         assert!(!self.readonly);
         self.readonly = true;
+        self.verify();
+    }
 
+    fn verify(&self) {
         let mut position = 0;
         let mut bad_canary: Option<(usize, *const usize)> = None;
         for (_,v) in self.map.iter() {
             let canary_ref = &v.0;
             if *canary_ref == CANARY {
                 position += 1;
                 continue;
             }
@@ -100,72 +105,76 @@ impl<K: Hash + Eq, V, S: BuildHasher> Di
 
     #[inline(always)]
     pub fn try_get_or_insert_with<F: FnOnce() -> V>(
         &mut self,
         key: K,
         default: F
     ) -> Result<&mut V, FailedAllocationError> {
         assert!(!self.readonly);
-        self.journal.push(JournalEntry::GetOrInsertWith(self.map.make_hash(&key)));
+        self.journal.push(JournalEntry::GOIW(self.map.make_hash(&key).inspect()));
         let entry = self.map.try_entry(key)?;
         Ok(&mut entry.or_insert_with(|| (CANARY, default())).1)
     }
 
     #[inline(always)]
     pub fn try_insert(&mut self, k: K, v: V) -> Result<Option<V>, FailedAllocationError> {
         assert!(!self.readonly);
-        self.journal.push(JournalEntry::Insert(self.map.make_hash(&k)));
+        self.journal.push(JournalEntry::Insert(self.map.make_hash(&k).inspect()));
         let old = self.map.try_insert(k, (CANARY, v))?;
         Ok(old.map(|x| x.1))
     }
 
     #[inline(always)]
     pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
         where K: Borrow<Q>,
               Q: Hash + Eq
     {
         assert!(!self.readonly);
-        self.journal.push(JournalEntry::Remove(self.map.make_hash(k)));
+        self.journal.push(JournalEntry::Remove(self.map.make_hash(k).inspect()));
         self.map.remove(k).map(|x| x.1)
     }
 
     #[inline(always)]
     pub fn clear(&mut self) where K: 'static, V: 'static  {
         // We handle scoped mutations for the caller here, since callsites that
         // invoke clear() don't benefit from the coalescing we do around insertion.
         self.begin_mutation();
         self.journal.clear();
         self.journal.push(JournalEntry::DidClear(self.map.raw_capacity()));
         self.map.clear();
         self.end_mutation();
     }
 
     #[inline(never)]
     fn report_corruption(
-        &mut self,
+        &self,
         canary: usize,
         canary_addr: *const usize,
         position: usize
     ) {
+        use ::std::ffi::CString;
+        let key = b"HashMapJournal\0";
+        let value = CString::new(format!("{:?}", self.journal)).unwrap();
         unsafe {
-            Gecko_AddBufferToCrashReport(
-                self.journal.as_ptr() as *const _,
-                self.journal.len() * ::std::mem::size_of::<JournalEntry>(),
+            Gecko_AnnotateCrashReport(
+                key.as_ptr() as *const ::std::os::raw::c_char,
+                value.as_ptr(),
             );
         }
         panic!(
-            "HashMap Corruption (sz={}, cap={}, pairsz={}, cnry={:#x}, pos={}, base_addr={:?}, cnry_addr={:?})",
+            "HashMap Corruption (sz={}, cap={}, pairsz={}, cnry={:#x}, pos={}, base_addr={:?}, cnry_addr={:?}, jrnl_len={})",
             self.map.len(),
             self.map.raw_capacity(),
             ::std::mem::size_of::<(K, (usize, V))>(),
             canary,
             position,
             self.map.raw_buffer(),
             canary_addr,
+            self.journal.len(),
         );
     }
 }
 
 impl<K, V, S> PartialEq for DiagnosticHashMap<K, V, S>
     where K: Eq + Hash,
           V: PartialEq,
           S: BuildHasher
@@ -200,11 +209,11 @@ impl<K: Hash + Eq, V, S: BuildHasher> Dr
           S: BuildHasher
 {
     fn drop(&mut self) {
         debug_assert!(self.readonly, "Dropped while mutating");
     }
 }
 
 extern "C" {
-    pub fn Gecko_AddBufferToCrashReport(addr: *const ::std::os::raw::c_void,
-                                        bytes: usize);
+    pub fn Gecko_AnnotateCrashReport(key_str: *const ::std::os::raw::c_char,
+                                     value_str: *const ::std::os::raw::c_char);
 }
--- a/servo/components/hashglobe/src/hash_map.rs
+++ b/servo/components/hashglobe/src/hash_map.rs
@@ -689,16 +689,22 @@ impl<K, V, S> HashMap<K, V, S>
 
     /// Returns a raw pointer to the table's buffer.
     #[inline]
     pub fn raw_buffer(&self) -> *const u8 {
         assert!(self.len() != 0);
         self.table.raw_buffer()
     }
 
+    /// Verify that the table metadata is internally consistent.
+    #[inline]
+    pub fn verify(&self) {
+        self.table.verify();
+    }
+
     /// Reserves capacity for at least `additional` more elements to be inserted
     /// in the `HashMap`. The collection may reserve more space to avoid
     /// frequent reallocations.
     ///
     /// # Panics
     ///
     /// Panics if the new allocation size overflows [`usize`].
     ///
--- a/servo/components/hashglobe/src/table.rs
+++ b/servo/components/hashglobe/src/table.rs
@@ -242,23 +242,31 @@ impl<K, V> RawBucket<K, V> {
     }
     unsafe fn pair(&self) -> *mut (K, V) {
         self.pair_start.offset(self.idx as isize) as *mut (K, V)
     }
     unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) {
         (self.hash(), self.pair())
     }
 
-    fn assert_bounds(&self, bytes_allocated: usize) {
+    fn assert_bounds(&self, bytes_allocated: usize, size: Option<usize>) {
         let base = self.hash_start as *mut u8;
         let (h, p) = unsafe { self.hash_pair() };
         assert!((h as *mut u8) < (p as *mut u8), "HashMap Corruption - hash offset not below pair offset");
         let end = unsafe { p.offset(1) } as *mut u8;
-        assert!(end > base, "HashMap Corruption - end={:?}, base={:?}", end, base);
-        assert!(end <= unsafe { base.offset(bytes_allocated as isize) }, "HashMap Corruption - end={:?}, base={:?}", end, base);
+        assert!(end > base, "HashMap Corruption - end={:?}, base={:?}, idx={}, alloc={}, size={:?}", end, base, self.idx, bytes_allocated, size);
+        assert!(
+            end <= unsafe { base.offset(bytes_allocated as isize) },
+            "HashMap Corruption - end={:?}, base={:?}, idx={}, alloc={}, size={:?}",
+            end,
+            base,
+            self.idx,
+            bytes_allocated,
+            size,
+        );
     }
 }
 
 // Buckets hold references to the table.
 impl<K, V, M> FullBucket<K, V, M> {
     /// Borrow a reference to the table.
     pub fn table(&self) -> &M {
         &self.table
@@ -426,23 +434,23 @@ impl<K, V, M: Deref<Target = RawTable<K,
                 })
             }
         }
     }
 
     /// Modifies the bucket in place to make it point to the next slot.
     pub fn next(&mut self) {
         self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask;
-        self.raw.assert_bounds(self.table.bytes_allocated);
+        self.raw.assert_bounds(self.table.bytes_allocated, None);
     }
 
     /// Modifies the bucket in place to make it point to the previous slot.
     pub fn prev(&mut self) {
         self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask;
-        self.raw.assert_bounds(self.table.bytes_allocated);
+        self.raw.assert_bounds(self.table.bytes_allocated, None);
     }
 }
 
 impl<K, V, M: Deref<Target = RawTable<K, V>>> EmptyBucket<K, V, M> {
     #[inline]
     pub fn next(self) -> Bucket<K, V, M> {
         let mut bucket = self.into_bucket();
         bucket.next();
@@ -808,16 +816,17 @@ impl<K, V> RawTable<K, V> {
             size: 0,
             hashes: TaggedHashUintPtr::new(hashes),
             bytes_allocated: size,
             marker: marker::PhantomData,
         })
     }
 
     fn raw_bucket_at(&self, index: usize) -> RawBucket<K, V> {
+        self.verify();
         let hashes_size = self.capacity() * size_of::<HashUint>();
         let pairs_size = self.capacity() * size_of::<(K, V)>();
 
         let (pairs_offset, _, oflo) =
             calculate_offsets(hashes_size, pairs_size, align_of::<(K, V)>());
         assert!(!oflo, "HashMap Corruption - capacity overflow");
         assert!(pairs_offset as isize > 0, "HashMap Corruption - pairs offset={}", pairs_offset);
         assert!(index as isize >= 0, "HashMap Corruption - index={}", index);
@@ -828,26 +837,40 @@ impl<K, V> RawTable<K, V> {
             RawBucket {
                 hash_start: buffer as *mut HashUint,
                 pair_start: buffer.offset(pairs_offset as isize) as *const (K, V),
                 idx: index,
                 _marker: marker::PhantomData,
             }
         };
 
-        bucket.assert_bounds(self.bytes_allocated);
+        bucket.assert_bounds(self.bytes_allocated, Some(self.size));
         bucket
     }
 
     /// Returns a raw pointer to the table's buffer.
     #[inline]
     pub fn raw_buffer(&self) -> *const u8 {
         self.hashes.ptr() as *const u8
     }
 
+    /// Verify that the table metadata is internally consistent.
+    #[inline]
+    pub fn verify(&self) {
+        assert!(
+            self.capacity() == 0 || self.capacity().is_power_of_two(),
+            "HashMap Corruption: mask={}, sz={}, alloc={}", self.capacity_mask, self.size, self.bytes_allocated,
+        );
+        assert_eq!(
+            self.capacity() * (size_of::<usize>() + size_of::<(K, V)>()),
+            self.bytes_allocated,
+            "HashMap Corruption: mask={}, sz={}, alloc={}", self.capacity_mask, self.size, self.bytes_allocated,
+        );
+    }
+
     /// Creates a new raw table from a given capacity. All buckets are
     /// initially empty.
     pub fn new(capacity: usize) -> Result<RawTable<K, V>, FailedAllocationError> {
         unsafe {
             let ret = RawTable::try_new_uninitialized(capacity)?;
             ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
             Ok(ret)
         }
@@ -928,17 +951,17 @@ impl<K, V> RawTable<K, V> {
             if *raw.hash() != EMPTY_BUCKET {
                 ptr::drop_in_place(raw.pair());
                 elems_left = elems_left.checked_sub(1).unwrap();
                 if elems_left == 0 {
                     return;
                 }
             }
             raw.idx = raw.idx.checked_sub(1).unwrap();
-            raw.assert_bounds(self.bytes_allocated);
+            raw.assert_bounds(self.bytes_allocated, Some(self.size));
         }
     }
 
     /// Set the table tag
     pub fn set_tag(&mut self, value: bool) {
         self.hashes.set_tag(value)
     }
 
@@ -988,22 +1011,22 @@ impl<'a, K, V> Iterator for RawBuckets<'
 
         loop {
             unsafe {
                 let item = self.raw.unwrap();
                 if *item.hash() != EMPTY_BUCKET {
                     self.elems_left = self.elems_left.checked_sub(1).unwrap();
                     if self.elems_left != 0 {
                         self.raw.as_mut().unwrap().idx += 1;
-                        self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated);
+                        self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated, None);
                     }
                     return Some(item);
                 }
                 self.raw.as_mut().unwrap().idx += 1;
-                self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated);
+                self.raw.as_ref().unwrap().assert_bounds(self.bytes_allocated, None);
             }
         }
     }
 
     fn size_hint(&self) -> (usize, Option<usize>) {
         (self.elems_left, Some(self.elems_left))
     }
 }
@@ -1202,19 +1225,19 @@ impl<K: Clone, V: Clone> Clone for RawTa
                     ptr::write(new_buckets.pair(), kv);
                 }
 
                 if buckets.idx == cap - 1 {
                     break;
                 }
 
                 buckets.idx += 1;
-                buckets.assert_bounds(self.bytes_allocated);
+                buckets.assert_bounds(self.bytes_allocated, None);
                 new_buckets.idx += 1;
-                new_buckets.assert_bounds(new_ht.bytes_allocated);
+                new_buckets.assert_bounds(new_ht.bytes_allocated, None);
             }
 
             new_ht.size = self.size();
 
             new_ht
         }
     }
 }
--- a/servo/components/script/Cargo.toml
+++ b/servo/components/script/Cargo.toml
@@ -79,17 +79,17 @@ servo_arc = {path = "../servo_arc"}
 servo_atoms = {path = "../atoms"}
 servo_config = {path = "../config"}
 servo_geometry = {path = "../geometry" }
 servo_rand = {path = "../rand"}
 servo_url = {path = "../url"}
 smallvec = "0.4"
 style = {path = "../style"}
 style_traits = {path = "../style_traits"}
-swapper = "0.0.4"
+swapper = "0.1"
 time = "0.1.12"
 unicode-segmentation = "1.1.0"
 url = {version = "1.2", features = ["heap_size", "query_encoding"]}
 utf-8 = "0.7"
 uuid = {version = "0.5", features = ["v4"]}
 xml5ever = {version = "0.10"}
 webrender_api = {git = "https://github.com/servo/webrender", features = ["ipc"]}
 webvr_traits = {path = "../webvr_traits"}
--- a/servo/components/servo_arc/lib.rs
+++ b/servo/components/servo_arc/lib.rs
@@ -384,21 +384,21 @@ impl<T: ?Sized> Drop for Arc<T> {
         unsafe {
             self.drop_slow();
         }
     }
 }
 
 impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
     fn eq(&self, other: &Arc<T>) -> bool {
-        *(*self) == *(*other)
+        Self::ptr_eq(self, other) || *(*self) == *(*other)
     }
 
     fn ne(&self, other: &Arc<T>) -> bool {
-        *(*self) != *(*other)
+        !Self::ptr_eq(self, other) && *(*self) != *(*other)
     }
 }
 impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
     fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
         (**self).partial_cmp(&**other)
     }
 
     fn lt(&self, other: &Arc<T>) -> bool {
--- a/servo/components/style/gecko/generated/bindings.rs
+++ b/servo/components/style/gecko/generated/bindings.rs
@@ -2939,17 +2939,19 @@ extern "C" {
                                     set: RawServoStyleSetBorrowed)
      -> ServoStyleContextStrong;
 }
 extern "C" {
     pub fn Servo_SetExplicitStyle(element: RawGeckoElementBorrowed,
                                   primary_style: ServoStyleContextBorrowed);
 }
 extern "C" {
-    pub fn Servo_HasAuthorSpecifiedRules(element: RawGeckoElementBorrowed,
+    pub fn Servo_HasAuthorSpecifiedRules(style: ServoStyleContextBorrowed,
+                                         element: RawGeckoElementBorrowed,
+                                         pseudo_type: CSSPseudoElementType,
                                          rule_type_mask: u32,
                                          author_colors_allowed: bool) -> bool;
 }
 extern "C" {
     pub fn Servo_ResolveStyleLazily(element: RawGeckoElementBorrowed,
                                     pseudo_type: CSSPseudoElementType,
                                     rule_inclusion: StyleRuleInclusion,
                                     snapshots:
--- a/servo/components/style/rule_tree/mod.rs
+++ b/servo/components/style/rule_tree/mod.rs
@@ -2,16 +2,18 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #![allow(unsafe_code)]
 
 //! The rule tree.
 
 use applicable_declarations::ApplicableDeclarationList;
+#[cfg(feature = "gecko")]
+use gecko::selector_parser::PseudoElement;
 #[cfg(feature = "servo")]
 use heapsize::HeapSizeOf;
 #[cfg(feature = "gecko")]
 use malloc_size_of::{MallocSizeOf, MallocSizeOfOps};
 use properties::{Importance, LonghandIdSet, PropertyDeclarationBlock};
 use servo_arc::{Arc, ArcBorrow, NonZeroPtrMut};
 use shared_lock::{Locked, StylesheetGuards, SharedRwLockReadGuard};
 use smallvec::SmallVec;
@@ -1072,16 +1074,17 @@ impl StrongRuleNode {
     /// Implementation of `nsRuleNode::HasAuthorSpecifiedRules` for Servo rule
     /// nodes.
     ///
     /// Returns true if any properties specified by `rule_type_mask` was set by
     /// an author rule.
     #[cfg(feature = "gecko")]
     pub fn has_author_specified_rules<E>(&self,
                                          mut element: E,
+                                         mut pseudo: Option<PseudoElement>,
                                          guards: &StylesheetGuards,
                                          rule_type_mask: u32,
                                          author_colors_allowed: bool)
         -> bool
         where E: ::dom::TElement
     {
         use gecko_bindings::structs::{NS_AUTHOR_SPECIFIED_BACKGROUND, NS_AUTHOR_SPECIFIED_BORDER};
         use gecko_bindings::structs::{NS_AUTHOR_SPECIFIED_PADDING, NS_AUTHOR_SPECIFIED_TEXT_SHADOW};
@@ -1286,24 +1289,30 @@ impl StrongRuleNode {
                         }
                     }
                 }
             }
 
             if !have_explicit_ua_inherit { break }
 
             // Continue to the parent element and search for the inherited properties.
-            element = match element.inheritance_parent() {
-                Some(parent) => parent,
-                None => break
-            };
+            if let Some(pseudo) = pseudo.take() {
+                if pseudo.inherits_from_default_values() {
+                    break;
+                }
+            } else {
+                element = match element.inheritance_parent() {
+                    Some(parent) => parent,
+                    None => break
+                };
 
-            let parent_data = element.mutate_data().unwrap();
-            let parent_rule_node = parent_data.styles.primary().rules().clone();
-            element_rule_node = Cow::Owned(parent_rule_node);
+                let parent_data = element.mutate_data().unwrap();
+                let parent_rule_node = parent_data.styles.primary().rules().clone();
+                element_rule_node = Cow::Owned(parent_rule_node);
+            }
 
             properties = inherited_properties;
         }
 
         false
     }
 
     /// Returns true if there is either animation or transition level rule.
--- a/servo/components/style/values/specified/calc.rs
+++ b/servo/components/style/values/specified/calc.rs
@@ -368,17 +368,17 @@ impl CalcNode {
                         );
                     }
                     NoCalcLength::FontRelative(rel) => {
                         match rel {
                             FontRelativeLength::Em(em) => {
                                 ret.em = Some(ret.em.unwrap_or(0.) + em * factor);
                             }
                             FontRelativeLength::Ex(ex) => {
-                                ret.ex = Some(ret.em.unwrap_or(0.) + ex * factor);
+                                ret.ex = Some(ret.ex.unwrap_or(0.) + ex * factor);
                             }
                             FontRelativeLength::Ch(ch) => {
                                 ret.ch = Some(ret.ch.unwrap_or(0.) + ch * factor);
                             }
                             FontRelativeLength::Rem(rem) => {
                                 ret.rem = Some(ret.rem.unwrap_or(0.) + rem * factor);
                             }
                         }
--- a/servo/ports/geckolib/glue.rs
+++ b/servo/ports/geckolib/glue.rs
@@ -1858,36 +1858,34 @@ pub extern "C" fn Servo_SetExplicitStyle
     // We only support this API for initial styling. There's no reason it couldn't
     // work for other things, we just haven't had a reason to do so.
     debug_assert!(element.get_data().is_none());
     let mut data = unsafe { element.ensure_data() };
     data.styles.primary = Some(unsafe { ArcBorrow::from_ref(style) }.clone_arc());
 }
 
 #[no_mangle]
-pub extern "C" fn Servo_HasAuthorSpecifiedRules(element: RawGeckoElementBorrowed,
+pub extern "C" fn Servo_HasAuthorSpecifiedRules(style: ServoStyleContextBorrowed,
+                                                element: RawGeckoElementBorrowed,
+                                                pseudo_type: CSSPseudoElementType,
                                                 rule_type_mask: u32,
                                                 author_colors_allowed: bool)
     -> bool
 {
     let element = GeckoElement(element);
-
-    let data =
-        element.borrow_data()
-        .expect("calling Servo_HasAuthorSpecifiedRules on an unstyled element");
-
-    let primary_style = data.styles.primary();
+    let pseudo = PseudoElement::from_pseudo_type(pseudo_type);
 
     let guard = (*GLOBAL_STYLE_DATA).shared_lock.read();
     let guards = StylesheetGuards::same(&guard);
 
-    primary_style.rules().has_author_specified_rules(element,
-                                                     &guards,
-                                                     rule_type_mask,
-                                                     author_colors_allowed)
+    style.rules().has_author_specified_rules(element,
+                                             pseudo,
+                                             &guards,
+                                             rule_type_mask,
+                                             author_colors_allowed)
 }
 
 fn get_pseudo_style(
     guard: &SharedRwLockReadGuard,
     element: GeckoElement,
     pseudo: &PseudoElement,
     rule_inclusion: RuleInclusion,
     styles: &ElementStyles,
--- a/taskcluster/ci/test/test-sets.yml
+++ b/taskcluster/ci/test/test-sets.yml
@@ -61,16 +61,18 @@ talos:
     - talos-g3
     - talos-g4
     - talos-g5
     - talos-other
     - talos-svgr
     - talos-tp5o
     - talos-perf-reftest
     - talos-perf-reftest-singletons
+    - talos-tp6
+    - talos-tp6-stylo-threads
 
 awsy:
     - awsy
 
 awsy-stylo-disabled:
     - awsy-stylo-disabled
 
 awsy-stylo-sequential:
@@ -94,19 +96,19 @@ stylo-disabled-tests:
     - mochitest-gpu
     - mochitest-media
     - mochitest-webgl
 
 reftest-stylo:
     - reftest-stylo
 
 qr-talos:
-    # - talos-chrome # fails with layers-free
+    - talos-chrome
     - talos-dromaeojs
-    # - talos-g1 # fails with layers-free
+    - talos-g1
     # - talos-g2 # doesn't work with QR yet
     - talos-g3
     - talos-g4
     - talos-g5
     # - talos-other # fails with layers-free
     # - talos-svgr # fails with layers-free
     - talos-tp5o
     - talos-perf-reftest
@@ -139,16 +141,17 @@ linux-talos-stylo-disabled:
     - talos-g3-stylo-disabled
     - talos-g4-stylo-disabled
     - talos-g5-stylo-disabled
     - talos-other-stylo-disabled
     - talos-svgr-stylo-disabled
     - talos-tp5o-stylo-disabled
     - talos-perf-reftest-stylo-disabled
     - talos-perf-reftest-singletons-stylo-disabled
+    - talos-tp6-stylo-disabled
 
 windows-reftest-gpu:
     - reftest-gpu
 
 windows-tests:
     - cppunit
     - crashtest
     - firefox-ui-functional-local
--- a/taskcluster/ci/test/tests.yml
+++ b/taskcluster/ci/test/tests.yml
@@ -1020,41 +1020,29 @@ reftest:
             linux64-qr/.*: 1
             windows10-64-asan.*: 3
             default: default
 
 reftest-gpu:
     description: "Reftest GPU run"
     suite: reftest/reftest-gpu
     treeherder-symbol: tc-R(Rg)
-    chunks:
-        by-test-platform:
-            # Remove special casing when debug isn't using BBB anymore
-            windows7-32.*/debug: 1
-            default: 8
+    chunks: 8
     run-on-projects:
         by-test-platform:
             windows10.*: []
             windows8-64.*: []
             default: built-projects
-    worker-type:
-        by-test-platform:
-            windows7-32.*/debug: buildbot-bridge/buildbot-bridge
-            default: null
     instance-size: default
     virtualization: virtual-with-gpu
     max-run-time: 3600
     mozharness:
         script: desktop_unittest.py
         no-read-buildbot-config: true
-        chunked:
-            # Remove special casing when debug isn't using BBB anymore
-            by-test-platform:
-                windows7-32.*/debug: false
-                default: true
+        chunked: true
         config:
             by-test-platform:
                 windows.*:
                     - unittests/win_taskcluster_unittest.py
                 macosx.*:
                     - unittests/mac_unittest.py
                 linux.*:
                     - unittests/linux_unittest.py
@@ -1752,19 +1740,19 @@ talos-tp5o-stylo-disabled:
 
 talos-tp6:
     description: "Talos Tp6"
     suite: talos
     try-name: tp6
     treeherder-symbol: tc-T(tp6)
     run-on-projects:
         by-test-platform:
-            windows.*: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try', 'date']
+            windows.*: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try']
             macosx.*: ['mozilla-beta', 'autoland', 'try']
-            default: []
+            default: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try']
     max-run-time: 3600
     mozharness:
         script: talos_script.py
         no-read-buildbot-config: true
         config:
             by-test-platform:
                 macosx.*:
                     - talos/mac_config.py
@@ -1782,17 +1770,17 @@ talos-tp6-stylo-disabled:
     suite: talos
     try-name: tp6-stylo-disabled
     treeherder-symbol: tc-Tsd(tp6)
     virtualization: hardware
     run-on-projects:
         by-test-platform:
             windows.*: ['mozilla-beta', 'mozilla-central', 'try']
             macosx.*: ['mozilla-beta', 'mozilla-central', 'try']
-            default: []
+            default: ['mozilla-beta', 'mozilla-central', 'try']
     max-run-time: 3600
     mozharness:
         script: talos_script.py
         no-read-buildbot-config: true
         config:
             by-test-platform:
                 macosx.*:
                     - talos/mac_config.py
@@ -1808,17 +1796,17 @@ talos-tp6-stylo-threads:
     suite: talos
     try-name: tp6-stylo-threads
     treeherder-symbol: tc-Tss(tp6)
     virtualization: hardware
     run-on-projects:
         by-test-platform:
             windows.*: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try']
             macosx.*: ['mozilla-beta', 'autoland', 'try']
-            default: []
+            default: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try']
     max-run-time: 3600
     mozharness:
         script: talos_script.py
         no-read-buildbot-config: true
         config:
             by-test-platform:
                 macosx.*:
                     - talos/mac_config.py
@@ -1831,17 +1819,17 @@ talos-tp6-stylo-threads:
 
 talos-xperf:
     description: "Talos xperf"
     suite: talos
     try-name: xperf
     treeherder-symbol: tc-T(x)
     run-on-projects:
         by-test-platform:
-            windows7-32.*: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try', 'date']
+            windows7-32.*: ['mozilla-beta', 'mozilla-central', 'mozilla-inbound', 'autoland', 'try']
             default: []
     max-run-time: 3600
     mozharness:
         script: talos_script.py
         no-read-buildbot-config: true
         config:
             by-test-platform:
                 macosx.*:
--- a/testing/marionette/atom.js
+++ b/testing/marionette/atom.js
@@ -1,9 +1,9 @@
-// Copyright 2011-2014 Software Freedom Conservancy
+// Copyright 2011-2017 Software Freedom Conservancy
 //
 // Licensed under the Apache License, Version 2.0 (the "License");
 // you may not use this file except in compliance with the License.
 // You may obtain a copy of the License at
 //
 //     http://www.apache.org/licenses/LICENSE-2.0
 //
 // Unless required by applicable law or agreed to in writing, software
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -412,22 +412,19 @@ class Talos(TestingMixin, MercurialScrip
     def setup_mitmproxy(self):
         """Some talos tests require the use of mitmproxy to playback the pages,
         set it up here.
         """
         if not self.query_mitmproxy_recording_set():
             self.info("Skipping: mitmproxy is not required")
             return
 
-        # tp6 is supported in production only on win and macosx
         os_name = self.platform_name()
-        if 'win' not in os_name and os_name != 'macosx':
-            self.fatal("Aborting: this test is not supported on this platform.")
 
-        # on windows we need to install a pytyon 3 virtual env; on macosx we
+        # on windows we need to install a pytyon 3 virtual env; on macosx and linux we
         # use a mitmdump pre-built binary that doesn't need an external python 3
         if 'win' in os_name:
             # setup python 3.x virtualenv
             self.setup_py3_virtualenv()
 
         # install mitmproxy
         self.install_mitmproxy()
 
@@ -452,26 +449,30 @@ class Talos(TestingMixin, MercurialScrip
 
     def install_mitmproxy(self):
         """Install the mitmproxy tool into the Python 3.x env"""
         if 'win' in self.platform_name():
             self.info("Installing mitmproxy")
             self.py3_install_modules(modules=['mitmproxy'])
             self.mitmdump = os.path.join(self.py3_path_to_executables(), 'mitmdump')
         else:
-            # on macosx we use a prebuilt mitmproxy release binary
+            # on macosx and linux64 we use a prebuilt mitmproxy release binary
             mitmproxy_path = os.path.join(self.talos_path, 'talos', 'mitmproxy')
             self.mitmdump = os.path.join(mitmproxy_path, 'mitmdump')
             if not os.path.exists(self.mitmdump):
                 # download the mitmproxy release binary; will be overridden by the --no-download
                 if '--no-download' not in self.config['talos_extra_options']:
-                    self.query_mitmproxy_rel_bin('osx')
+                    if 'osx' in self.platform_name():
+                        _platform = 'osx'
+                    else:
+                        _platform = 'linux64'
+                    self.query_mitmproxy_rel_bin(_platform)
                     if self.mitmproxy_rel_bin is None:
                         self.fatal("Aborting: mitmproxy_release_bin_osx not found in talos.json")
-                    self.download_mitmproxy_binary('osx')
+                    self.download_mitmproxy_binary(_platform)
                 else:
                     self.info("Not downloading mitmproxy rel binary because no-download was specified")
             self.info('The mitmdump macosx binary is found at: %s' % self.mitmdump)
         self.run_command([self.mitmdump, '--version'], env=self.query_env())
 
     def query_mitmproxy_rel_bin(self, platform):
         """Mitmproxy requires external playback archives to be downloaded and extracted"""
         if self.mitmproxy_rel_bin:
@@ -482,25 +483,28 @@ class Talos(TestingMixin, MercurialScrip
             return self.mitmproxy_rel_bin
 
     def download_mitmproxy_binary(self, platform):
         """Download the mitmproxy release binary from tooltool"""
         self.info("Downloading the mitmproxy release binary using tooltool")
         dest = os.path.join(self.talos_path, 'talos', 'mitmproxy')
         _manifest = "mitmproxy-rel-bin-%s.manifest" % platform
         manifest_file = os.path.join(self.talos_path, 'talos', 'mitmproxy', _manifest)
-        self.tooltool_fetch(
-            manifest_file,
-            output_dir=dest,
-            cache=self.config.get('tooltool_cache')
-        )
-        archive = os.path.join(dest, self.mitmproxy_rel_bin)
-        tar = self.query_exe('tar')
-        unzip_cmd = [tar, '-xvzf', archive, '-C', dest]
-        self.run_command(unzip_cmd, halt_on_failure=True)
+
+        if platform in ['osx', 'linux64']:
+            self.tooltool_fetch(
+                manifest_file,
+                output_dir=dest,
+                cache=self.config.get('tooltool_cache')
+            )
+
+            archive = os.path.join(dest, self.mitmproxy_rel_bin)
+            tar = self.query_exe('tar')
+            unzip_cmd = [tar, '-xvzf', archive, '-C', dest]
+            self.run_command(unzip_cmd, halt_on_failure=True)
 
     def query_mitmproxy_recording_set(self):
         """Mitmproxy requires external playback archives to be downloaded and extracted"""
         if self.mitmproxy_recording_set:
             return self.mitmproxy_recording_set
         if self.query_talos_json_config() and self.suite is not None:
             self.mitmproxy_recording_set = self.talos_json_config['suites'][self.suite].get('mitmproxy_recording_set', False)
             return self.mitmproxy_recording_set
--- a/testing/talos/talos.json
+++ b/testing/talos/talos.json
@@ -107,37 +107,40 @@
                 "--disable-stylo",
                 "--xperf_path",
                 "\"c:/Program Files/Microsoft Windows Performance Toolkit/xperf.exe\""
             ]
         },
         "tp6-e10s": {
             "tests": ["tp6_google", "tp6_youtube", "tp6_amazon", "tp6_facebook"],
             "mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
+            "mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
             "mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
             "talos_options": [
                 "--mitmproxy",
                 "mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
                 "--firstNonBlankPaint"
             ]
         },
         "tp6-stylo-disabled-e10s": {
             "tests": ["tp6_google", "tp6_youtube", "tp6_amazon", "tp6_facebook"],
             "mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
+            "mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
             "mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
             "talos_options": [
                 "--disable-stylo",
                 "--mitmproxy",
                 "mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
                 "--firstNonBlankPaint"
             ]
         },
         "tp6-stylo-threads-e10s": {
             "tests": ["tp6_google", "tp6_youtube", "tp6_amazon", "tp6_facebook"],
             "mitmproxy_release_bin_osx": "mitmproxy-2.0.2-osx.tar.gz",
+            "mitmproxy_release_bin_linux64": "mitmproxy-2.0.2-linux.tar.gz",
             "mitmproxy_recording_set": "mitmproxy-recording-set-win10.zip",
             "talos_options": [
                 "--stylo-threads=1",
                 "--mitmproxy",
                 "mitmproxy-recording-google.mp mitmproxy-recording-youtube.mp mitmproxy-recording-amazon.mp mitmproxy-recording-facebook.mp",
                 "--firstNonBlankPaint"
             ]
         }
new file mode 100644
--- /dev/null
+++ b/testing/talos/talos/mitmproxy/mitmproxy-rel-bin-linux64.manifest
@@ -0,0 +1,9 @@
+[
+    {
+        "filename": "mitmproxy-2.0.2-linux.tar.gz",
+        "size": 48997542,
+        "digest": "b032e04b8763206a19f80b78062efa59dc901ad32fd8d6cf2d20e22744711352da61e75d93a0d93d645179153534f72a154f73432837db415c9b0cd9d981f012",
+        "algorithm": "sha512",
+        "unpack": false
+    }
+]
--- a/testing/talos/talos/mitmproxy/mitmproxy.py
+++ b/testing/talos/talos/mitmproxy/mitmproxy.py
@@ -127,23 +127,20 @@ def start_mitmproxy_playback(mitmdump_pa
 
     # this part is platform-specific
     if mozinfo.os == 'win':
         param2 = '""' + param.replace('\\', '\\\\\\') + ' ' + \
                  ' '.join(mitmproxy_recordings).replace('\\', '\\\\\\') + '""'
         sys.path.insert(1, mitmdump_path)
         # mitmproxy needs some DLL's that are a part of Firefox itself, so add to path
         env["PATH"] = os.path.dirname(browser_path) + ";" + env["PATH"]
-    elif mozinfo.os == 'mac':
+    else:
+        # mac and linux
         param2 = param + ' ' + ' '.join(mitmproxy_recordings)
         env["PATH"] = os.path.dirname(browser_path)
-    else:
-        # TODO: support other platforms, Bug 1366355
-        LOG.error('Aborting: talos mitmproxy is currently only supported on Windows and Mac')
-        sys.exit()
 
     command = [mitmdump_path, '-k', '-s', param2]
 
     LOG.info("Starting mitmproxy playback using env path: %s" % env["PATH"])
     LOG.info("Starting mitmproxy playback using command: %s" % ' '.join(command))
     # to turn off mitmproxy log output, use these params for Popen:
     # Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
     mitmproxy_proc = subprocess.Popen(command, env=env)
@@ -155,20 +152,20 @@ def start_mitmproxy_playback(mitmdump_pa
     # cannot continue as we won't be able to playback the pages
     LOG.error('Aborting: mitmproxy playback process failed to start, poll returned: %s' % data)
     sys.exit()
 
 
 def stop_mitmproxy_playback(mitmproxy_proc):
     """Stop the mitproxy server playback"""
     LOG.info("Stopping mitmproxy playback, klling process %d" % mitmproxy_proc.pid)
-    if mozinfo.os == 'mac':
+    if mozinfo.os == 'win':
+        mitmproxy_proc.kill()
+    else:
         mitmproxy_proc.terminate()
-    else:
-        mitmproxy_proc.kill()
     time.sleep(10)
     if mitmproxy_proc.pid in psutil.pids():
         # I *think* we can still continue, as process will be automatically
         # killed anyway when mozharness is done (?) if not, we won't be able
         # to startup mitmxproy next time if it is already running
         LOG.error("Failed to kill the mitmproxy playback process")
     else:
         LOG.info("Successfully killed the mitmproxy playback process")
--- a/toolkit/components/passwordmgr/LoginManagerContent.jsm
+++ b/toolkit/components/passwordmgr/LoginManagerContent.jsm
@@ -79,17 +79,17 @@ var observer = {
   // nsIWebProgressListener
   onLocationChange(aWebProgress, aRequest, aLocation, aFlags) {
     // Only handle pushState/replaceState here.
     if (!(aFlags & Ci.nsIWebProgressListener.LOCATION_CHANGE_SAME_DOCUMENT) ||
         !(aWebProgress.loadType & Ci.nsIDocShell.LOAD_CMD_PUSHSTATE)) {
       return;
     }
 
-    log("onLocationChange handled:", aLocation.spec, aWebProgress.DOMWindow.document);
+    log("onLocationChange handled:", aLocation.displaySpec, aWebProgress.DOMWindow.document);
 
     LoginManagerContent._onNavigation(aWebProgress.DOMWindow.document);
   },
 
   onStateChange(aWebProgress, aRequest, aState, aStatus) {
     if (!(aState & Ci.nsIWebProgressListener.STATE_START)) {
       return;
     }
@@ -1382,17 +1382,17 @@ var LoginUtils = {
     var realm = "";
     try {
       var uri = Services.io.newURI(uriString);
 
       if (allowJS && uri.scheme == "javascript")
         return "javascript:";
 
       // Build this manually instead of using prePath to avoid including the userPass portion.
-      realm = uri.scheme + "://" + uri.hostPort;
+      realm = uri.scheme + "://" + uri.displayHostPort;
     } catch (e) {
       // bug 159484 - disallow url types that don't support a hostPort.
       // (although we handle "javascript:..." as a special case above.)
       log("Couldn't parse origin for", uriString, e);
       realm = null;
     }
 
     return realm;
--- a/toolkit/components/passwordmgr/LoginManagerContextMenu.jsm
+++ b/toolkit/components/passwordmgr/LoginManagerContextMenu.jsm
@@ -88,25 +88,25 @@ var LoginManagerContextMenu = {
    *        URI object with the hostname of the logins we want to find.
    *        This isn't the same as the browser's top-level document URI
    *        when subframes are involved.
    *
    * @returns {nsILoginInfo[]} a login list
    */
   _findLogins(documentURI) {
     let searchParams = {
-      hostname: documentURI.prePath,
+      hostname: documentURI.displayPrePath,
       schemeUpgrades: LoginHelper.schemeUpgrades,
     };
     let logins = LoginHelper.searchLoginsWithObject(searchParams);
     let resolveBy = [
       "scheme",
       "timePasswordChanged",
     ];
-    logins = LoginHelper.dedupeLogins(logins, ["username", "password"], resolveBy, documentURI.prePath);
+    logins = LoginHelper.dedupeLogins(logins, ["username", "password"], resolveBy, documentURI.displayPrePath);
 
     // Sort logins in alphabetical order and by date.
     logins.sort((loginA, loginB) => {
       // Sort alphabetically
       let result = loginA.username.localeCompare(loginB.username);
       if (result) {
         // Forces empty logins to be at the end
         if (!loginA.username) {
@@ -157,17 +157,17 @@ var LoginManagerContextMenu = {
    * @param {nsIURI} documentURI
    *        URI of the document owning the form we want to fill.
    *        This isn't the same as the browser's top-level
    *        document URI when subframes are involved.
    */
   _fillTargetField(login, inputElement, browser, documentURI) {
     LoginManagerParent.fillForm({
       browser,
-      loginFormOrigin: documentURI.prePath,
+      loginFormOrigin: documentURI.displayPrePath,
       login,
       inputElement,
     }).catch(Cu.reportError);
   },
 
   /**
    * @param {string} key
    *        The localized string key
--- a/toolkit/components/passwordmgr/nsLoginManager.js
+++ b/toolkit/components/passwordmgr/nsLoginManager.js
@@ -357,17 +357,17 @@ LoginManager.prototype = {
     log.debug("Getting a list of all disabled origins");
 
     let disabledHosts = [];
     let enumerator = Services.perms.enumerator;
 
     while (enumerator.hasMoreElements()) {
       let perm = enumerator.getNext();
       if (perm.type == PERMISSION_SAVE_LOGINS && perm.capability == Services.perms.DENY_ACTION) {
-        disabledHosts.push(perm.principal.URI.prePath);
+        disabledHosts.push(perm.principal.URI.displayPrePath);
       }
     }
 
     if (count)
       count.value = disabledHosts.length; // needed for XPCOM
 
     log.debug("getAllDisabledHosts: returning", disabledHosts.length, "disabled hosts.");
     return disabledHosts;
--- a/toolkit/components/passwordmgr/nsLoginManagerPrompter.js
+++ b/toolkit/components/passwordmgr/nsLoginManagerPrompter.js
@@ -1573,17 +1573,17 @@ LoginManagerPrompter.prototype = {
   _getFormattedHostname(aURI) {
     let uri;
     if (aURI instanceof Ci.nsIURI) {
       uri = aURI;
     } else {
       uri = Services.io.newURI(aURI);
     }
 
-    return uri.scheme + "://" + uri.hostPort;
+    return uri.scheme + "://" + uri.displayHostPort;
   },
 
 
   /**
    * Converts a login's hostname field (a URL) to a short string for
    * prompting purposes. Eg, "http://foo.com" --> "foo.com", or
    * "ftp://www.site.co.uk" --> "site.co.uk".
    */
--- a/toolkit/components/passwordmgr/storage-mozStorage.js
+++ b/toolkit/components/passwordmgr/storage-mozStorage.js
@@ -462,17 +462,17 @@ LoginManagerStorage_mozStorage.prototype
           if (value != null) {
             condition += `${field} = :${field}`;
             params[field] = value;
             let valueURI;
             try {
               if (aOptions.schemeUpgrades && (valueURI = Services.io.newURI(value)) &&
                   valueURI.scheme == "https") {
                 condition += ` OR ${field} = :http${field}`;
-                params["http" + field] = "http://" + valueURI.hostPort;
+                params["http" + field] = "http://" + valueURI.displayHostPort;
               }
             } catch (ex) {
               // newURI will throw for some values (e.g. chrome://FirefoxAccounts)
               // but those URLs wouldn't support upgrades anyways.
             }
             break;
           }
           // Fall through
--- a/toolkit/components/passwordmgr/test/unit/test_disabled_hosts.js
+++ b/toolkit/components/passwordmgr/test/unit/test_disabled_hosts.js
@@ -149,26 +149,26 @@ add_task(async function test_storage_set
   let hostname = "http://大.net";
   let encoding = "http://xn--pss.net";
 
   // Test adding disabled host with nonascii URL (http://大.net).
   Services.logins.setLoginSavingEnabled(hostname, false);
   await LoginTestUtils.reloadData();
   Assert.equal(Services.logins.getLoginSavingEnabled(hostname), false);
   Assert.equal(Services.logins.getLoginSavingEnabled(encoding), false);
-  LoginTestUtils.assertDisabledHostsEqual(Services.logins.getAllDisabledHosts(), [encoding]);
+  LoginTestUtils.assertDisabledHostsEqual(Services.logins.getAllDisabledHosts(), [hostname]);
 
   LoginTestUtils.clearData();
 
   // Test adding disabled host with IDN ("http://xn--pss.net").
   Services.logins.setLoginSavingEnabled(encoding, false);
   await LoginTestUtils.reloadData();
   Assert.equal(Services.logins.getLoginSavingEnabled(hostname), false);
   Assert.equal(Services.logins.getLoginSavingEnabled(encoding), false);
-  LoginTestUtils.assertDisabledHostsEqual(Services.logins.getAllDisabledHosts(), [encoding]);
+  LoginTestUtils.assertDisabledHostsEqual(Services.logins.getAllDisabledHosts(), [hostname]);
 
   LoginTestUtils.clearData();
 });
 
 /**
  * Tests storing disabled hosts with non-ASCII characters where IDN is not supported.
  */
 add_task(async function test_storage_setLoginSavingEnabled_nonascii_IDN_not_supported()
--- a/toolkit/components/places/PlacesUtils.jsm
+++ b/toolkit/components/places/PlacesUtils.jsm
@@ -130,16 +130,19 @@ async function notifyKeywordChange(url, 
  *        Whether the node represents a livemark.
  */
 function serializeNode(aNode, aIsLivemark) {
   let data = {};
 
   data.title = aNode.title;
   data.id = aNode.itemId;
   data.livemark = aIsLivemark;
+  // Add an instanceId so we can tell which instance of an FF session the data
+  // is coming from.
+  data.instanceId = PlacesUtils.instanceId;
 
   let guid = aNode.bookmarkGuid;
   if (guid) {
     data.itemGuid = guid;
     if (aNode.parent)
       data.parent = aNode.parent.itemId;
     let grandParent = aNode.parent && aNode.parent.parent;
     if (grandParent)
@@ -2010,16 +2013,21 @@ XPCOMUtils.defineLazyGetter(PlacesUtils,
 
 XPCOMUtils.defineLazyGetter(this, "bundle", function() {
   const PLACES_STRING_BUNDLE_URI = "chrome://places/locale/places.properties";
   return Cc["@mozilla.org/intl/stringbundle;1"].
          getService(Ci.nsIStringBundleService).
          createBundle(PLACES_STRING_BUNDLE_URI);
 });
 
+// This is just used as a reasonably-random value for copy & paste / drag operations.
+XPCOMUtils.defineLazyGetter(PlacesUtils, "instanceId", () => {
+  return PlacesUtils.history.makeGuid();
+});
+
 /**
  * Setup internal databases for closing properly during shutdown.
  *
  * 1. Places initiates shutdown.
  * 2. Before places can move to the step where it closes the low-level connection,
  *   we need to make sure that we have closed `conn`.
  * 3. Before we can close `conn`, we need to make sure that all external clients
  *   have stopped using `conn`.
--- a/toolkit/components/thumbnails/BackgroundPageThumbs.jsm
+++ b/toolkit/components/thumbnails/BackgroundPageThumbs.jsm
@@ -100,16 +100,23 @@ const BackgroundPageThumbs = {
    * @param url      The URL to capture.
    * @param options  An optional object that configures the capture.  See
    *                 capture() for description.
    *   unloadingPromise This option is resolved when the calling context is
    *                    unloading, so things can be cleaned up to avoid leak.
    * @return {Promise} A Promise that resolves when this task completes
    */
   async captureIfMissing(url, options = {}) {
+    // Short circuit this function if pref is enabled, or else we leak observers.
+    // See Bug 1400562
+    if (!PageThumbs._prefEnabled()) {
+      if (options.onDone)
+        options.onDone(url);
+      return url;
+    }
     // The fileExistsForURL call is an optimization, potentially but unlikely
     // incorrect, and no big deal when it is.  After the capture is done, we
     // atomically test whether the file exists before writing it.
     let exists = await PageThumbsStorage.fileExistsForURL(url);
     if (exists) {
       if (options.onDone) {
         options.onDone(url);
       }
--- a/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
+++ b/toolkit/components/url-classifier/nsUrlClassifierHashCompleter.js
@@ -430,26 +430,30 @@ HashCompleterRequest.prototype = {
     }
   },
 
   // Creates an nsIChannel for the request and fills the body.
   openChannel: function HCR_openChannel() {
     let loadFlags = Ci.nsIChannel.INHIBIT_CACHING |
                     Ci.nsIChannel.LOAD_BYPASS_CACHE;
 
-    this.actualGethashUrl = this.gethashUrl;
+    this.request = {
+      url: this.gethashUrl,
+      body: ""
+    };
+
     if (this.isV4) {
       // As per spec, we add the request payload to the gethash url.
-      this.actualGethashUrl += "&$req=" + this.buildRequestV4();
+      this.request.url += "&$req=" + this.buildRequestV4();
     }
 
-    log("actualGethashUrl: " + this.actualGethashUrl);
+    log("actualGethashUrl: " + this.request.url);
 
     let channel = NetUtil.newChannel({
-      uri: this.actualGethashUrl,
+      uri: this.request.url,
       loadUsingSystemPrincipal: true
     });
     channel.loadFlags = loadFlags;
     channel.loadInfo.originAttributes = {
       // The firstPartyDomain value should sync with NECKO_SAFEBROWSING_FIRST_PARTY_DOMAIN
       // defined in nsNetUtil.h.
       firstPartyDomain: "safebrowsing.86868755-6b82-4842-b301-72671a0db32e.mozilla"
     };
@@ -778,17 +782,17 @@ HashCompleterRequest.prototype = {
     let success = Components.isSuccessCode(aStatusCode);
     log("Received a " + httpStatus + " status code from the " + this.provider +
         " gethash server (success=" + success + ").");
 
     Services.telemetry.getKeyedHistogramById("URLCLASSIFIER_COMPLETE_REMOTE_STATUS2").
       add(this.telemetryProvider, httpStatusToBucket(httpStatus));
     if (httpStatus == 400) {
       dump("Safe Browsing server returned a 400 during completion: request= " +
-           this.actualGethashUrl + "\n");
+           this.request.url + ",payload= " + this.request.body + "\n");
     }
 
     Services.telemetry.getKeyedHistogramById("URLCLASSIFIER_COMPLETE_TIMEOUT2").
       add(this.telemetryProvider, 0);
 
     // Notify the RequestBackoff once a response is received.
     this._completer.finishRequest(this.gethashUrl, httpStatus);
 
--- a/toolkit/components/url-classifier/nsUrlClassifierStreamUpdater.cpp
+++ b/toolkit/components/url-classifier/nsUrlClassifierStreamUpdater.cpp
@@ -108,19 +108,17 @@ NS_IMPL_ISUPPORTS(nsUrlClassifierStreamU
 void
 nsUrlClassifierStreamUpdater::DownloadDone()
 {
   LOG(("nsUrlClassifierStreamUpdater::DownloadDone [this=%p]", this));
   mIsUpdating = false;
 
   mPendingUpdates.Clear();
   mDownloadError = false;
-  mSuccessCallback = nullptr;
-  mUpdateErrorCallback = nullptr;
-  mDownloadErrorCallback = nullptr;
+  mCurrentRequest = nullptr;
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // nsIUrlClassifierStreamUpdater implementation
 
 nsresult
 nsUrlClassifierStreamUpdater::FetchUpdate(nsIURI *aUpdateUrl,
                                           const nsACString & aRequestPayload,
@@ -281,27 +279,23 @@ nsUrlClassifierStreamUpdater::DownloadUp
   NS_ENSURE_ARG(aSuccessCallback);
   NS_ENSURE_ARG(aUpdateErrorCallback);
   NS_ENSURE_ARG(aDownloadErrorCallback);
 
   if (mIsUpdating) {
     LOG(("Already updating, queueing update %s from %s", aRequestPayload.Data(),
          aUpdateUrl.Data()));
     *_retval = false;
-    PendingRequest *request = mPendingRequests.AppendElement(fallible);
+    UpdateRequest *request = mPendingRequests.AppendElement(fallible);
     if (!request) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
-    request->mTables = aRequestTables;
-    request->mRequestPayload = aRequestPayload;
-    request->mIsPostRequest = aIsPostRequest;
-    request->mUrl = aUpdateUrl;
-    request->mSuccessCallback = aSuccessCallback;
-    request->mUpdateErrorCallback = aUpdateErrorCallback;
-    request->mDownloadErrorCallback = aDownloadErrorCallback;
+    BuildUpdateRequest(aRequestTables, aRequestPayload, aIsPostRequest, aUpdateUrl,
+                       aSuccessCallback, aUpdateErrorCallback, aDownloadErrorCallback,
+                       request);
     return NS_OK;
   }
 
   if (aUpdateUrl.IsEmpty()) {
     NS_ERROR("updateUrl not set");
     return NS_ERROR_NOT_INITIALIZED;
   }
 
@@ -324,27 +318,23 @@ nsUrlClassifierStreamUpdater::DownloadUp
     mInitialized = true;
   }
 
   rv = mDBService->BeginUpdate(this, aRequestTables);
   if (rv == NS_ERROR_NOT_AVAILABLE) {
     LOG(("Service busy, already updating, queuing update %s from %s",
          aRequestPayload.Data(), aUpdateUrl.Data()));
     *_retval = false;
-    PendingRequest *request = mPendingRequests.AppendElement(fallible);
+    UpdateRequest *request = mPendingRequests.AppendElement(fallible);
     if (!request) {
       return NS_ERROR_OUT_OF_MEMORY;
     }
-    request->mTables = aRequestTables;
-    request->mRequestPayload = aRequestPayload;
-    request->mIsPostRequest = aIsPostRequest;
-    request->mUrl = aUpdateUrl;
-    request->mSuccessCallback = aSuccessCallback;
-    request->mUpdateErrorCallback = aUpdateErrorCallback;
-    request->mDownloadErrorCallback = aDownloadErrorCallback;
+    BuildUpdateRequest(aRequestTables, aRequestPayload, aIsPostRequest, aUpdateUrl,
+                       aSuccessCallback, aUpdateErrorCallback, aDownloadErrorCallback,
+                       request);
 
     // We cannot guarantee that we will be notified when DBService is done
     // processing the current update, so we fire a retry timer on our own.
     nsresult rv;
     mFetchNextRequestTimer = do_CreateInstance("@mozilla.org/timer;1", &rv);
     if (NS_SUCCEEDED(rv)) {
       rv = mFetchNextRequestTimer->InitWithCallback(this,
                                                     FETCH_NEXT_REQUEST_RETRY_DELAY_MS,
@@ -361,24 +351,25 @@ nsUrlClassifierStreamUpdater::DownloadUp
   nsCOMPtr<nsIUrlClassifierUtils> urlUtil =
     do_GetService(NS_URLCLASSIFIERUTILS_CONTRACTID);
 
   nsTArray<nsCString> tables;
   mozilla::safebrowsing::Classifier::SplitTables(aRequestTables, tables);
   urlUtil->GetTelemetryProvider(tables.SafeElementAt(0, EmptyCString()),
                                 mTelemetryProvider);
 
-  mSuccessCallback = aSuccessCallback;
-  mUpdateErrorCallback = aUpdateErrorCallback;
-  mDownloadErrorCallback = aDownloadErrorCallback;
+  mCurrentRequest = MakeUnique<UpdateRequest>();
+  BuildUpdateRequest(aRequestTables, aRequestPayload, aIsPostRequest, aUpdateUrl,
+                     aSuccessCallback, aUpdateErrorCallback, aDownloadErrorCallback,
+                     mCurrentRequest.get());
 
   mIsUpdating = true;
   *_retval = true;
 
-  LOG(("FetchUpdate: %s", aUpdateUrl.Data()));
+  LOG(("FetchUpdate: %s", mCurrentRequest->mUrl.Data()));
 
   return FetchUpdate(aUpdateUrl, aRequestPayload, aIsPostRequest, EmptyCString());
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // nsIUrlClassifierUpdateObserver implementation
 
 NS_IMETHODIMP
@@ -441,33 +432,55 @@ nsUrlClassifierStreamUpdater::FetchNext(
 nsresult
 nsUrlClassifierStreamUpdater::FetchNextRequest()
 {
   if (mPendingRequests.Length() == 0) {
     LOG(("No more requests, returning"));
     return NS_OK;
   }
 
-  PendingRequest request = mPendingRequests[0];
+  UpdateRequest request = mPendingRequests[0];
   mPendingRequests.RemoveElementAt(0);
   LOG(("Stream updater: fetching next request: %s, %s",
        request.mTables.get(), request.mUrl.get()));
   bool dummy;
   DownloadUpdates(
     request.mTables,
     request.mRequestPayload,
     request.mIsPostRequest,
     request.mUrl,
     request.mSuccessCallback,
     request.mUpdateErrorCallback,
     request.mDownloadErrorCallback,
     &dummy);
   return NS_OK;
 }
 
+void
+nsUrlClassifierStreamUpdater::BuildUpdateRequest(
+  const nsACString &aRequestTables,
+  const nsACString &aRequestPayload,
+  bool aIsPostRequest,
+  const nsACString &aUpdateUrl,
+  nsIUrlClassifierCallback *aSuccessCallback,
+  nsIUrlClassifierCallback *aUpdateErrorCallback,
+  nsIUrlClassifierCallback *aDownloadErrorCallback,
+  UpdateRequest* aRequest)
+{
+  MOZ_ASSERT(aRequest);
+
+  aRequest->mTables = aRequestTables;
+  aRequest->mRequestPayload = aRequestPayload;
+  aRequest->mIsPostRequest = aIsPostRequest;
+  aRequest->mUrl = aUpdateUrl;
+  aRequest->mSuccessCallback = aSuccessCallback;
+  aRequest->mUpdateErrorCallback = aUpdateErrorCallback;
+  aRequest->mDownloadErrorCallback = aDownloadErrorCallback;
+}
+
 NS_IMETHODIMP
 nsUrlClassifierStreamUpdater::StreamFinished(nsresult status,
                                              uint32_t requestedDelay)
 {
   // We are a service and may not be reset with Init between calls, so reset
   // mBeganStream manually.
   mBeganStream = false;
   LOG(("nsUrlClassifierStreamUpdater::StreamFinished [%" PRIx32 ", %d]",
@@ -502,18 +515,21 @@ NS_IMETHODIMP
 nsUrlClassifierStreamUpdater::UpdateSuccess(uint32_t requestedTimeout)
 {
   LOG(("nsUrlClassifierStreamUpdater::UpdateSuccess [this=%p]", this));
   if (mPendingUpdates.Length() != 0) {
     NS_WARNING("Didn't fetch all safebrowsing update redirects");
   }
 
   // DownloadDone() clears mSuccessCallback, so we save it off here.
-  nsCOMPtr<nsIUrlClassifierCallback> successCallback = mDownloadError ? nullptr : mSuccessCallback.get();
-  nsCOMPtr<nsIUrlClassifierCallback> downloadErrorCallback = mDownloadError ? mDownloadErrorCallback.get() : nullptr;
+  nsCOMPtr<nsIUrlClassifierCallback> successCallback =
+    mDownloadError ? nullptr : mCurrentRequest->mSuccessCallback.get();
+  nsCOMPtr<nsIUrlClassifierCallback> downloadErrorCallback =
+    mDownloadError ? mCurrentRequest->mDownloadErrorCallback.get() : nullptr;
+
   DownloadDone();
 
   nsAutoCString strTimeout;
   strTimeout.AppendInt(requestedTimeout);
   if (successCallback) {
     LOG(("nsUrlClassifierStreamUpdater::UpdateSuccess callback [this=%p]",
          this));
     successCallback->HandleEvent(strTimeout);
@@ -530,18 +546,20 @@ nsUrlClassifierStreamUpdater::UpdateSucc
 }
 
 NS_IMETHODIMP
 nsUrlClassifierStreamUpdater::UpdateError(nsresult result)
 {
   LOG(("nsUrlClassifierStreamUpdater::UpdateError [this=%p]", this));
 
   // DownloadDone() clears mUpdateErrorCallback, so we save it off here.
-  nsCOMPtr<nsIUrlClassifierCallback> errorCallback = mDownloadError ? nullptr : mUpdateErrorCallback.get();
-  nsCOMPtr<nsIUrlClassifierCallback> downloadErrorCallback = mDownloadError ? mDownloadErrorCallback.get() : nullptr;
+  nsCOMPtr<nsIUrlClassifierCallback> errorCallback =
+    mDownloadError ? nullptr : mCurrentRequest->mUpdateErrorCallback.get();
+  nsCOMPtr<nsIUrlClassifierCallback> downloadErrorCallback =
+    mDownloadError ? mCurrentRequest->mDownloadErrorCallback.get() : nullptr;
   DownloadDone();
 
   nsAutoCString strResult;
   strResult.AppendInt(static_cast<uint32_t>(result));
   if (errorCallback) {
     errorCallback->HandleEvent(strResult);
   } else if (downloadErrorCallback) {
     LOG(("Notify download error callback in UpdateError [this=%p]", this));
@@ -637,24 +655,19 @@ nsUrlClassifierStreamUpdater::OnStartReq
       NS_ENSURE_SUCCESS(rv, rv);
 
       uint32_t requestStatus;
       rv = httpChannel->GetResponseStatus(&requestStatus);
       NS_ENSURE_SUCCESS(rv, rv);
       mozilla::Telemetry::Accumulate(mozilla::Telemetry::URLCLASSIFIER_UPDATE_REMOTE_STATUS2,
                                      mTelemetryProvider, HTTPStatusToBucket(requestStatus));
       if (requestStatus == 400) {
-        nsCOMPtr<nsIURI> uri;
-        nsAutoCString spec;
-        rv = httpChannel->GetURI(getter_AddRefs(uri));
-        if (NS_SUCCEEDED(rv) && uri) {
-          uri->GetAsciiSpec(spec);
-        }
-        printf_stderr("Safe Browsing server returned a 400 during update: request = %s \n",
-                      spec.get());
+        printf_stderr("Safe Browsing server returned a 400 during update:"
+                       "request url = %s, payload = %s\n",
+                       mCurrentRequest->mUrl.get(), mCurrentRequest->mRequestPayload.get());
       }
 
       LOG(("nsUrlClassifierStreamUpdater::OnStartRequest %s (%d)", succeeded ?
            "succeeded" : "failed", requestStatus));
       if (!succeeded) {
         // 404 or other error, pass error status back
         strStatus.AppendInt(requestStatus);
         downloadError = true;
@@ -663,17 +676,17 @@ nsUrlClassifierStreamUpdater::OnStartReq
   }
 
   if (downloadError) {
     LOG(("nsUrlClassifierStreamUpdater::Download error [this=%p]", this));
     mDownloadError = true;
     mDownloadErrorStatusStr = strStatus;
     status = NS_ERROR_ABORT;
   } else if (NS_SUCCEEDED(status)) {
-    MOZ_ASSERT(mDownloadErrorCallback);
+    MOZ_ASSERT(mCurrentRequest->mDownloadErrorCallback);
     mBeganStream = true;
     LOG(("nsUrlClassifierStreamUpdater::Beginning stream [this=%p]", this));
     rv = mDBService->BeginStream(mStreamTable);
     NS_ENSURE_SUCCESS(rv, rv);
   }
 
   mStreamTable.Truncate();
 
--- a/toolkit/components/url-classifier/nsUrlClassifierStreamUpdater.h
+++ b/toolkit/components/url-classifier/nsUrlClassifierStreamUpdater.h
@@ -66,16 +66,36 @@ private:
                        bool aIsPostRequest,
                        const nsACString &aTable);
 
   // Fetches the next table, from mPendingUpdates.
   nsresult FetchNext();
   // Fetches the next request, from mPendingRequests
   nsresult FetchNextRequest();
 
+  struct UpdateRequest {
+    nsCString mTables;
+    nsCString mRequestPayload;
+    bool mIsPostRequest;
+    nsCString mUrl;
+    nsCOMPtr<nsIUrlClassifierCallback> mSuccessCallback;
+    nsCOMPtr<nsIUrlClassifierCallback> mUpdateErrorCallback;
+    nsCOMPtr<nsIUrlClassifierCallback> mDownloadErrorCallback;
+  };
+  // Utility function to create an update request.
+  void
+  BuildUpdateRequest(const nsACString &aRequestTables,
+                     const nsACString &aRequestPayload,
+                     bool aIsPostRequest,
+                     const nsACString &aUpdateUrl,
+                     nsIUrlClassifierCallback *aSuccessCallback,
+                     nsIUrlClassifierCallback *aUpdateErrorCallback,
+                     nsIUrlClassifierCallback *aDownloadErrorCallback,
+                     UpdateRequest* aRequest);
+
   bool mIsUpdating;
   bool mInitialized;
   bool mDownloadError;
   bool mBeganStream;
 
   nsCString mDownloadErrorStatusStr;
 
   // Note that mStreamTable is only used by v2, it is empty for v4 update.
@@ -94,36 +114,25 @@ private:
   nsCOMPtr<nsITimer> mFetchNextRequestTimer;
 
   // Timer to abort the download if the server takes too long to respond.
   nsCOMPtr<nsITimer> mResponseTimeoutTimer;
 
   // Timer to abort the download if it takes too long.
   nsCOMPtr<nsITimer> mTimeoutTimer;
 
-  struct PendingRequest {
-    nsCString mTables;
-    nsCString mRequestPayload;
-    bool mIsPostRequest;
-    nsCString mUrl;
-    nsCOMPtr<nsIUrlClassifierCallback> mSuccessCallback;
-    nsCOMPtr<nsIUrlClassifierCallback> mUpdateErrorCallback;
-    nsCOMPtr<nsIUrlClassifierCallback> mDownloadErrorCallback;
-  };
-  nsTArray<PendingRequest> mPendingRequests;
+  mozilla::UniquePtr<UpdateRequest> mCurrentRequest;
+  nsTArray<UpdateRequest> mPendingRequests;
 
   struct PendingUpdate {
     nsCString mUrl;
     nsCString mTable;
   };
   nsTArray<PendingUpdate> mPendingUpdates;
 
-  nsCOMPtr<nsIUrlClassifierCallback> mSuccessCallback;
-  nsCOMPtr<nsIUrlClassifierCallback> mUpdateErrorCallback;
-  nsCOMPtr<nsIUrlClassifierCallback> mDownloadErrorCallback;
 
   // The provider for current update request and should be only used by telemetry
   // since it would show up as "other" for any other providers.
   nsCString mTelemetryProvider;
   PRIntervalTime mTelemetryClockStart;
 };
 
 #endif // nsUrlClassifierStreamUpdater_h_
--- a/toolkit/mozapps/update/updater/archivereader.cpp
+++ b/toolkit/mozapps/update/updater/archivereader.cpp
@@ -33,21 +33,16 @@
 #undef UPDATER_NO_STRING_GLUE_STL
 
 #if defined(XP_UNIX)
 # include <sys/types.h>
 #elif defined(XP_WIN)
 # include <io.h>
 #endif
 
-static size_t inbuf_size  = 262144;
-static size_t outbuf_size = 262144;
-static uint8_t *inbuf  = nullptr;
-static uint8_t *outbuf = nullptr;
-
 /**
  * Performs a verification on the opened MAR file with the passed in
  * certificate name ID and type ID.
  *
  * @param  archive   The MAR file to verify the signature on.
  * @param  certData  The certificate data.
  * @return OK on success, CERT_VERIFY_ERROR on failure.
 */
@@ -178,34 +173,34 @@ ArchiveReader::VerifyProductInformation(
 }
 
 int
 ArchiveReader::Open(const NS_tchar *path)
 {
   if (mArchive)
     Close();
 
-  if (!inbuf) {
-    inbuf = (uint8_t *)malloc(inbuf_size);
-    if (!inbuf) {
+  if (!mInBuf) {
+    mInBuf = (uint8_t *)malloc(mInBufSize);
+    if (!mInBuf) {
       // Try again with a smaller buffer.
-      inbuf_size = 1024;
-      inbuf = (uint8_t *)malloc(inbuf_size);
-      if (!inbuf)
+      mInBufSize = 1024;
+      mInBuf = (uint8_t *)malloc(mInBufSize);
+      if (!mInBuf)
         return ARCHIVE_READER_MEM_ERROR;
     }
   }
 
-  if (!outbuf) {
-    outbuf = (uint8_t *)malloc(outbuf_size);
-    if (!outbuf) {
+  if (!mOutBuf) {
+    mOutBuf = (uint8_t *)malloc(mOutBufSize);
+    if (!mOutBuf) {
       // Try again with a smaller buffer.
-      outbuf_size = 1024;
-      outbuf = (uint8_t *)malloc(outbuf_size);
-      if (!outbuf)
+      mOutBufSize = 1024;
+      mOutBuf = (uint8_t *)malloc(mOutBufSize);
+      if (!mOutBuf)
         return ARCHIVE_READER_MEM_ERROR;
     }
   }
 
 #ifdef XP_WIN
   mArchive = mar_wopen(path);
 #else
   mArchive = mar_open(path);
@@ -222,24 +217,24 @@ ArchiveReader::Open(const NS_tchar *path
 void
 ArchiveReader::Close()
 {
   if (mArchive) {
     mar_close(mArchive);
     mArchive = nullptr;
   }
 
-  if (inbuf) {
-    free(inbuf);
-    inbuf = nullptr;
+  if (mInBuf) {
+    free(mInBuf);
+    mInBuf = nullptr;
   }
 
-  if (outbuf) {
-    free(outbuf);
-    outbuf = nullptr;
+  if (mOutBuf) {
+    free(mOutBuf);
+    mOutBuf = nullptr;
   }
 }
 
 int
 ArchiveReader::ExtractFile(const char *name, const NS_tchar *dest)
 {
   const MarItem *item = mar_find_item(mArchive, name);
   if (!item)
@@ -282,45 +277,45 @@ ArchiveReader::ExtractItemToStream(const
   struct xz_buf strm = { 0 };
   enum xz_ret xz_rv = XZ_OK;
 
   struct xz_dec * dec = xz_dec_init(XZ_DYNALLOC, 64 * 1024 * 1024);
   if (!dec) {
     return UNEXPECTED_XZ_ERROR;
   }
 
-  strm.in = inbuf;
+  strm.in = mInBuf;
   strm.in_pos = 0;
   strm.in_size = 0;
-  strm.out = outbuf;
+  strm.out = mOutBuf;
   strm.out_pos = 0;
-  strm.out_size = outbuf_size;
+  strm.out_size = mOutBufSize;
 
   offset = 0;
   for (;;) {
     if (!item->length) {
       ret = UNEXPECTED_MAR_ERROR;
       break;
     }
 
     if (offset < (int) item->length && strm.in_pos == strm.in_size) {
-      inlen = mar_read(mArchive, item, offset, inbuf, inbuf_size);
+      inlen = mar_read(mArchive, item, offset, mInBuf, mInBufSize);
       if (inlen <= 0) {
         ret = READ_ERROR;
         break;
       }
       offset += inlen;
       strm.in_size = inlen;
       strm.in_pos = 0;
     }
 
     xz_rv = xz_dec_run(dec, &strm);
 
-    if (strm.out_pos == outbuf_size) {
-      if (fwrite(outbuf, 1, strm.out_pos, fp) != strm.out_pos) {
+    if (strm.out_pos == mOutBufSize) {
+      if (fwrite(mOutBuf, 1, strm.out_pos, fp) != strm.out_pos) {
         ret = WRITE_ERROR_EXTRACT;
         break;
       }
 
       strm.out_pos = 0;
     }
 
     if (xz_rv == XZ_OK) {
@@ -333,17 +328,17 @@ ArchiveReader::ExtractItemToStream(const
     if (xz_rv != XZ_STREAM_END) {
       ret = UNEXPECTED_XZ_ERROR;
       break;
     }
 
     // Write out the remainder of the decompressed data. In the case of
     // strm.out_pos == 0 this is needed to create empty files included in the
     // mar file.
-    if (fwrite(outbuf, 1, strm.out_pos, fp) != strm.out_pos) {
+    if (fwrite(mOutBuf, 1, strm.out_pos, fp) != strm.out_pos) {
       ret = WRITE_ERROR_EXTRACT;
     }
     break;
   }
 
   xz_dec_end(dec);
   return ret;
 }
--- a/toolkit/mozapps/update/updater/archivereader.h
+++ b/toolkit/mozapps/update/updater/archivereader.h
@@ -15,27 +15,31 @@
 #else
   typedef char NS_tchar;
 #endif
 
 // This class provides an API to extract files from an update archive.
 class ArchiveReader
 {
 public:
-  ArchiveReader() : mArchive(nullptr) {}
+  ArchiveReader() {}
   ~ArchiveReader() { Close(); }
 
   int Open(const NS_tchar *path);
   int VerifySignature();
   int VerifyProductInformation(const char *MARChannelID,
                                const char *appVersion);
   void Close();
 
   int ExtractFile(const char *item, const NS_tchar *destination);
   int ExtractFileToStream(const char *item, FILE *fp);
 
 private:
   int ExtractItemToStream(const MarItem *item, FILE *fp);
 
-  MarFile *mArchive;
+  MarFile *mArchive = nullptr;
+  uint8_t *mInBuf = nullptr;
+  uint8_t *mOutBuf = nullptr;
+  size_t mInBufSize  = 262144;
+  size_t mOutBufSize = 262144;
 };
 
 #endif  // ArchiveReader_h__