Merge mozilla-central to inbound. a=merge CLOSED TREE
authorOana Pop Rus <opoprus@mozilla.com>
Wed, 16 Jan 2019 11:40:36 +0200
changeset 514072 2d1497d9bb0402cb5873759ff4e038352dda2d6e
parent 514071 1230184adda1d000a3b339a599416c015eea4119 (current diff)
parent 514043 e56cc5e7b57a5d18ab72207f7f246a9b8c610c1c (diff)
child 514073 ed01030dd8adf3973f2c5ee1620e8e268e7ad07c
push id1953
push userffxbld-merge
push dateMon, 11 Mar 2019 12:10:20 +0000
treeherdermozilla-release@9c35dcbaa899 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to inbound. a=merge CLOSED TREE
toolkit/pluginproblem/content/pluginProblemBinding.css
toolkit/pluginproblem/pluginGlue.manifest
--- a/browser/base/content/test/static/browser_parsable_css.js
+++ b/browser/base/content/test/static/browser_parsable_css.js
@@ -27,17 +27,17 @@ let whitelist = [
   {sourceName: /highlighters\.css$/i,
    errorMessage: /Unknown pseudo-class.*moz-native-anonymous/i,
    isFromDevTools: true},
   // UA-only media features.
   {sourceName: /\b(autocomplete-item|svg)\.css$/,
    errorMessage: /Expected media feature name but found \u2018-moz.*/i,
    isFromDevTools: false},
 
-  {sourceName: /\b(contenteditable|EditorOverride|svg|forms|html|mathml|ua)\.css$/i,
+  {sourceName: /\b(contenteditable|EditorOverride|svg|forms|html|mathml|ua|pluginproblem)\.css$/i,
    errorMessage: /Unknown pseudo-class.*-moz-/i,
    isFromDevTools: false},
   {sourceName: /\b(html|mathml|ua)\.css$/i,
    errorMessage: /Unknown property.*-moz-/i,
    isFromDevTools: false},
   // Reserved to UA sheets unless layout.css.overflow-clip-box.enabled flipped to true.
   {sourceName: /(?:res|gre-resources)\/forms\.css$/i,
    errorMessage: /Unknown property.*overflow-clip-box/i,
--- a/browser/components/preferences/in-content/privacy.js
+++ b/browser/components/preferences/in-content/privacy.js
@@ -467,19 +467,21 @@ var gPrivacyPane = {
 
     this.highlightCBCategory();
     this.readBlockCookies();
 
     let link = document.getElementById("contentBlockingLearnMore");
     let contentBlockingUrl = Services.urlFormatter.formatURLPref("app.support.baseURL") + "content-blocking";
     link.setAttribute("href", contentBlockingUrl);
 
+    let contentBlockingTour = Services.urlFormatter.formatURLPref("privacy.trackingprotection.introURL")
+      + `?step=3&newtab=true`;
     let warningLinks = document.getElementsByClassName("content-blocking-warning-learn-how");
     for (let warningLink of warningLinks) {
-      warningLink.setAttribute("href", contentBlockingUrl);
+      warningLink.setAttribute("href", contentBlockingTour);
     }
   },
 
   highlightCBCategory() {
     let value = document.getElementById("contentBlockingCategoryRadio").value;
     let standardEl = document.getElementById("contentBlockingOptionStandard");
     let strictEl = document.getElementById("contentBlockingOptionStrict");
     let customEl = document.getElementById("contentBlockingOptionCustom");
--- a/browser/installer/package-manifest.in
+++ b/browser/installer/package-manifest.in
@@ -230,17 +230,16 @@
 @RESPATH@/components/nsUpdateService.manifest
 @RESPATH@/components/nsUpdateService.js
 @RESPATH@/components/nsUpdateServiceStub.js
 #endif
 @RESPATH@/components/nsUpdateTimerManager.manifest
 @RESPATH@/components/nsUpdateTimerManager.js
 @RESPATH@/components/utils.manifest
 @RESPATH@/components/simpleServices.js
-@RESPATH@/components/pluginGlue.manifest
 @RESPATH@/components/ProcessSingleton.manifest
 @RESPATH@/components/MainProcessSingleton.js
 @RESPATH@/components/ContentProcessSingleton.js
 @RESPATH@/components/nsURLFormatter.manifest
 @RESPATH@/components/nsURLFormatter.js
 @RESPATH@/components/toolkitplaces.manifest
 @RESPATH@/components/nsTaggingService.js
 @RESPATH@/components/UnifiedComplete.js
--- a/browser/installer/windows/nsis/defines.nsi.in
+++ b/browser/installer/windows/nsis/defines.nsi.in
@@ -74,16 +74,25 @@
 !else if "@MOZ_UPDATE_CHANNEL@" == "beta"
 !define AccessibleHandlerCLSID "{21E9F98D-A6C9-4CB5-B288-AE2FD2A96C58}"
 !else if "@MOZ_UPDATE_CHANNEL@" == "release"
 !define AccessibleHandlerCLSID "{1BAA303D-B4B9-45E5-9CCB-E3FCA3E274B6}"
 !else
 !define AccessibleHandlerCLSID "{4A195748-DCA2-45FB-9295-0A139E76A9E7}"
 !endif
 
+#ifdef MOZ_LAUNCHER_PROCESS
+!define MOZ_LAUNCHER_PROCESS
+!define MOZ_LAUNCHER_SUBKEY "Software\Mozilla\${AppName}\Launcher"
+#endif
+
+#ifdef RELEASE_OR_BETA
+!define RELEASE_OR_BETA
+#endif
+
 # Due to official and beta using the same branding this is needed to
 # differentiante between the url used by the stub for downloading.
 !if "@MOZ_UPDATE_CHANNEL@" == "beta"
 !define BETA_UPDATE_CHANNEL
 !endif
 
 !define BaseURLStubPing "http://download-stats.mozilla.org/stub"
 
--- a/browser/installer/windows/nsis/installer.nsi
+++ b/browser/installer/windows/nsis/installer.nsi
@@ -484,16 +484,22 @@ Section "-Application" APP_IDX
   StrCpy $0 "Software\Microsoft\MediaPlayer\ShimInclusionList\plugin-container.exe"
   ${CreateRegKey} "$TmpVal" "$0" 0
 
   ${If} $TmpVal == "HKLM"
     ; Set the permitted LSP Categories
     ${SetAppLSPCategories} ${LSP_CATEGORIES}
   ${EndIf}
 
+!ifdef MOZ_LAUNCHER_PROCESS
+!ifdef RELEASE_OR_BETA
+  ${DisableLauncherProcessByDefault}
+!endif
+!endif
+
   ; Create shortcuts
   ${LogHeader} "Adding Shortcuts"
 
   ; Remove the start menu shortcuts and directory if the SMPROGRAMS section
   ; exists in the shortcuts_log.ini and the SMPROGRAMS. The installer's shortcut
   ; creation code will create the shortcut in the root of the Start Menu
   ; Programs directory.
   ${RemoveStartMenuDir}
--- a/browser/installer/windows/nsis/shared.nsh
+++ b/browser/installer/windows/nsis/shared.nsh
@@ -157,16 +157,23 @@
       ; based on the above checks, so attempt to just run the EXE.
       ; In the worst case, in case there is some edge case with the
       ; IsAdmin check and the permissions check, the maintenance service
       ; will just fail to be attempted to be installed.
       nsExec::Exec "$\"$INSTDIR\maintenanceservice_installer.exe$\""
     ${EndIf}
   ${EndIf}
 !endif
+
+!ifdef MOZ_LAUNCHER_PROCESS
+!ifdef RELEASE_OR_BETA
+  ${DisableLauncherProcessByDefault}
+!endif
+!endif
+
 !macroend
 !define PostUpdate "!insertmacro PostUpdate"
 
 ; Update the last modified time on the Start Menu shortcut, so that its icon
 ; gets refreshed. Should be called on Win8+ after MigrateStartMenuShortcut
 ; and UpdateShortcutBranding.
 !macro TouchStartMenuShortcut
   ${If} ${FileExists} "$SMPROGRAMS\${BrandShortName}.lnk"
@@ -1602,8 +1609,27 @@ Function SetAsDefaultAppUser
   ${Else}
     GetFunctionAddress $0 SetAsDefaultAppUserHKCU
     UAC::ExecCodeSegment $0
   ${EndIf}
 FunctionEnd
 !define SetAsDefaultAppUser "Call SetAsDefaultAppUser"
 
 !endif ; NO_LOG
+
+!ifdef MOZ_LAUNCHER_PROCESS
+!ifdef RELEASE_OR_BETA
+!macro DisableLauncherProcessByDefault
+  ClearErrors
+  ${ReadRegQWORD} $0 HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Launcher"
+  ${If} ${Errors}
+    ClearErrors
+    ${ReadRegQWORD} $0 HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Browser"
+    ${If} ${Errors}
+      ClearErrors
+      ; New install that hasn't seen this yet; disable by default
+      ${WriteRegQWORD} HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Browser" 0
+    ${EndIf}
+  ${EndIf}
+!macroend
+!define DisableLauncherProcessByDefault "!insertmacro DisableLauncherProcessByDefault"
+!endif
+!endif
--- a/browser/installer/windows/nsis/uninstaller.nsi
+++ b/browser/installer/windows/nsis/uninstaller.nsi
@@ -406,16 +406,22 @@ Section "Uninstall"
   ${EndIf}
 
   ; Only unregister the dll if the registration points to this installation
   ReadRegStr $R1 HKCR "CLSID\${AccessibleHandlerCLSID}\InprocHandler32" ""
   ${If} "$INSTDIR\AccessibleHandler.dll" == "$R1"
     ${UnregisterDLL} "$INSTDIR\AccessibleHandler.dll"
   ${EndIf}
 
+!ifdef MOZ_LAUNCHER_PROCESS
+  DeleteRegValue HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Launcher"
+  DeleteRegValue HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Browser"
+  DeleteRegValue HKCU ${MOZ_LAUNCHER_SUBKEY} "$INSTDIR\${FileMainEXE}|Image"
+!endif
+
   ${un.RemovePrecompleteEntries} "false"
 
   ${If} ${FileExists} "$INSTDIR\defaults\pref\channel-prefs.js"
     Delete /REBOOTOK "$INSTDIR\defaults\pref\channel-prefs.js"
   ${EndIf}
   ${If} ${FileExists} "$INSTDIR\defaults\pref"
     RmDir /REBOOTOK "$INSTDIR\defaults\pref"
   ${EndIf}
--- a/dom/media/tests/mochitest/test_getUserMedia_permission.html
+++ b/dom/media/tests/mochitest/test_getUserMedia_permission.html
@@ -50,17 +50,17 @@ runTest(async () => {
       setTimeout(() => { throw e; });
     }
   }
 
   const source = `<html\><script\>(${sourceFn.toString()})()</script\></html\>`;
 
   // Test gUM in sandboxed vs. regular iframe.
 
-  for (const origin of ["http://mochi.test:8888", "https://example.com", "http://test1.mochi.test:8888"]) {
+  for (const origin of ["http://mochi.test:8888", "http://test1.mochi.test:8888"]) {
     const src = origin + path;
     is(await iframeGum({ src, sandbox: "allow-scripts" }),
        "NotAllowedError", "gUM fails in sandboxed iframe " + origin);
     is(await iframeGum({ src, sandbox: "allow-scripts allow-same-origin " + origin }),
        "success", "gUM works in regular iframe");
   }
 
   // Test gUM in sandboxed vs regular srcdoc iframe
--- a/dom/prio/test/gtest/TestPrioEncoder.cpp
+++ b/dom/prio/test/gtest/TestPrioEncoder.cpp
@@ -108,17 +108,17 @@ TEST(PrioEncoder, VerifyFull) {
   // Number of different boolean data fields we collect.
   const int ndata = 3;
 
   unsigned char batchIDStr[32];
   memset(batchIDStr, 0, sizeof batchIDStr);
   snprintf((char*)batchIDStr, sizeof batchIDStr, "%d", rand());
 
   bool dataItems[ndata];
-  unsigned long output[ndata];
+  unsigned long long output[ndata];
 
   // The client's data submission is an arbitrary boolean vector.
   for (int i = 0; i < ndata; i++) {
     // Arbitrary data
     dataItems[i] = rand() % 2;
   }
 
   // Initialize NSS random number generator.
@@ -128,22 +128,23 @@ TEST(PrioEncoder, VerifyFull) {
   // Generate keypairs for servers
   prioRv = Keypair_new(&skA, &pkA);
   ASSERT_TRUE(prioRv == SECSuccess);
 
   prioRv = Keypair_new(&skB, &pkB);
   ASSERT_TRUE(prioRv == SECSuccess);
 
   // Export public keys to hex and print to stdout
-  unsigned char pkHexA[CURVE25519_KEY_LEN_HEX + 1];
-  unsigned char pkHexB[CURVE25519_KEY_LEN_HEX + 1];
-  prioRv = PublicKey_export_hex(pkA, pkHexA);
+  const int keyLength = CURVE25519_KEY_LEN_HEX + 1;
+  unsigned char pkHexA[keyLength];
+  unsigned char pkHexB[keyLength];
+  prioRv = PublicKey_export_hex(pkA, pkHexA, keyLength);
   ASSERT_TRUE(prioRv == SECSuccess);
 
-  prioRv = PublicKey_export_hex(pkB, pkHexB);
+  prioRv = PublicKey_export_hex(pkB, pkHexB, keyLength);
   ASSERT_TRUE(prioRv == SECSuccess);
 
   // Use the default configuration parameters.
   cfg = PrioConfig_new(ndata, pkA, pkB, batchIDStr, strlen((char*)batchIDStr));
   ASSERT_TRUE(cfg != nullptr);
 
   PrioPRGSeed serverSecret;
   prioRv = PrioPRGSeed_randomize(&serverSecret);
--- a/gfx/layers/ipc/LayerTransactionParent.cpp
+++ b/gfx/layers/ipc/LayerTransactionParent.cpp
@@ -52,17 +52,16 @@ LayerTransactionParent::LayerTransaction
     TimeDuration aVsyncRate)
     : mLayerManager(aManager),
       mCompositorBridge(aBridge),
       mAnimStorage(aAnimStorage),
       mId(aId),
       mChildEpoch{0},
       mParentEpoch{0},
       mVsyncRate(aVsyncRate),
-      mPendingTransaction{0},
       mDestroyed(false),
       mIPCOpen(false),
       mUpdateHitTestingTree(false) {
   MOZ_ASSERT(mId.IsValid());
 }
 
 LayerTransactionParent::~LayerTransactionParent() {}
 
@@ -879,49 +878,62 @@ void LayerTransactionParent::DeallocShme
   }
   PLayerTransactionParent::DeallocShmem(aShmem);
 }
 
 bool LayerTransactionParent::IsSameProcess() const {
   return OtherPid() == base::GetCurrentProcId();
 }
 
+void LayerTransactionParent::SetPendingTransactionId(
+    TransactionId aId, const VsyncId& aVsyncId,
+    const TimeStamp& aVsyncStartTime, const TimeStamp& aRefreshStartTime,
+    const TimeStamp& aTxnStartTime, const TimeStamp& aTxnEndTime, bool aContainsSVG,
+    const nsCString& aURL, const TimeStamp& aFwdTime) {
+  mPendingTransactions.AppendElement(
+      PendingTransaction{aId, aVsyncId, aVsyncStartTime, aRefreshStartTime,
+                         aTxnStartTime, aTxnEndTime, aFwdTime, aURL, aContainsSVG});
+}
+
 TransactionId LayerTransactionParent::FlushTransactionId(
-    const VsyncId& aId, TimeStamp& aCompositeEnd) {
-  if (mId.IsValid() && mPendingTransaction.IsValid() && !mVsyncRate.IsZero()) {
-    RecordContentFrameTime(mTxnVsyncId, mVsyncStartTime, mTxnStartTime, aId,
-                           aCompositeEnd, mTxnEndTime - mTxnStartTime,
-                           mVsyncRate, mContainsSVG, false);
-  }
+    const VsyncId& aCompositeId, TimeStamp& aCompositeEnd) {
+  TransactionId id;
+  for (auto& transaction : mPendingTransactions) {
+    id = transaction.mId;
+    if (mId.IsValid() && transaction.mId.IsValid() && !mVsyncRate.IsZero()) {
+      RecordContentFrameTime(
+          transaction.mTxnVsyncId, transaction.mVsyncStartTime,
+          transaction.mTxnStartTime, aCompositeId, aCompositeEnd,
+          transaction.mTxnEndTime - transaction.mTxnStartTime, mVsyncRate,
+          transaction.mContainsSVG, false);
+    }
 
 #if defined(ENABLE_FRAME_LATENCY_LOG)
-  if (mPendingTransaction.IsValid()) {
-    if (mRefreshStartTime) {
-      int32_t latencyMs =
-          lround((aCompositeEnd - mRefreshStartTime).ToMilliseconds());
-      printf_stderr(
-          "From transaction start to end of generate frame latencyMs %d this "
-          "%p\n",
-          latencyMs, this);
+    if (transaction.mId.IsValid()) {
+      if (transaction.mRefreshStartTime) {
+        int32_t latencyMs = lround(
+            (aCompositeEnd - transaction.mRefreshStartTime).ToMilliseconds());
+        printf_stderr(
+            "From transaction start to end of generate frame latencyMs %d this "
+            "%p\n",
+            latencyMs, this);
+      }
+      if (transaction.mFwdTime) {
+        int32_t latencyMs =
+            lround((aCompositeEnd - transaction.mFwdTime).ToMilliseconds());
+        printf_stderr(
+            "From forwarding transaction to end of generate frame latencyMs %d "
+            "this %p\n",
+            latencyMs, this);
+      }
     }
-    if (mFwdTime) {
-      int32_t latencyMs = lround((aCompositeEnd - mFwdTime).ToMilliseconds());
-      printf_stderr(
-          "From forwarding transaction to end of generate frame latencyMs %d "
-          "this %p\n",
-          latencyMs, this);
-    }
+#endif
   }
-#endif
 
-  mRefreshStartTime = TimeStamp();
-  mTxnStartTime = TimeStamp();
-  mFwdTime = TimeStamp();
-  TransactionId id = mPendingTransaction;
-  mPendingTransaction = TransactionId{0};
+  mPendingTransactions.Clear();
   return id;
 }
 
 void LayerTransactionParent::SendAsyncMessage(
     const InfallibleTArray<AsyncParentMessageData>& aMessage) {
   MOZ_ASSERT_UNREACHABLE("unexpected to be called");
 }
 
--- a/gfx/layers/ipc/LayerTransactionParent.h
+++ b/gfx/layers/ipc/LayerTransactionParent.h
@@ -66,34 +66,23 @@ class LayerTransactionParent final : pub
 
   bool AllocUnsafeShmem(size_t aSize, ipc::SharedMemory::SharedMemoryType aType,
                         ipc::Shmem* aShmem) override;
 
   void DeallocShmem(ipc::Shmem& aShmem) override;
 
   bool IsSameProcess() const override;
 
-  const TransactionId& GetPendingTransactionId() { return mPendingTransaction; }
   void SetPendingTransactionId(TransactionId aId, const VsyncId& aVsyncId,
                                const TimeStamp& aVsyncStartTime,
                                const TimeStamp& aRefreshStartTime,
                                const TimeStamp& aTxnStartTime,
                                const TimeStamp& aTxnEndTime, bool aContainsSVG,
                                const nsCString& aURL,
-                               const TimeStamp& aFwdTime) {
-    mPendingTransaction = aId;
-    mTxnVsyncId = aVsyncId;
-    mVsyncStartTime = aVsyncStartTime;
-    mRefreshStartTime = aRefreshStartTime;
-    mTxnStartTime = aTxnStartTime;
-    mTxnEndTime = aTxnEndTime;
-    mContainsSVG = aContainsSVG;
-    mTxnURL = aURL;
-    mFwdTime = aFwdTime;
-  }
+                               const TimeStamp& aFwdTime);
   TransactionId FlushTransactionId(const VsyncId& aId,
                                    TimeStamp& aCompositeEnd);
 
   // CompositableParentManager
   void SendAsyncMessage(
       const InfallibleTArray<AsyncParentMessageData>& aMessage) override;
 
   void SendPendingAsyncMessages() override;
@@ -203,25 +192,28 @@ class LayerTransactionParent final : pub
   // parent. mChildEpoch is the latest epoch value received from the child.
   // mParentEpoch is the latest epoch value that we have told TabParent about
   // (via ObserveLayerUpdate).
   LayersObserverEpoch mChildEpoch;
   LayersObserverEpoch mParentEpoch;
 
   TimeDuration mVsyncRate;
 
-  TransactionId mPendingTransaction;
-  VsyncId mTxnVsyncId;
-  TimeStamp mVsyncStartTime;
-  TimeStamp mRefreshStartTime;
-  TimeStamp mTxnStartTime;
-  TimeStamp mTxnEndTime;
-  TimeStamp mFwdTime;
-  nsCString mTxnURL;
-  bool mContainsSVG;
+  struct PendingTransaction {
+    TransactionId mId;
+    VsyncId mTxnVsyncId;
+    TimeStamp mVsyncStartTime;
+    TimeStamp mRefreshStartTime;
+    TimeStamp mTxnStartTime;
+    TimeStamp mTxnEndTime;
+    TimeStamp mFwdTime;
+    nsCString mTxnURL;
+    bool mContainsSVG;
+  };
+  AutoTArray<PendingTransaction, 2> mPendingTransactions;
 
   // When the widget/frame/browser stuff in this process begins its
   // destruction process, we need to Disconnect() all the currently
   // live shadow layers, because some of them might be orphaned from
   // the layer tree.  This happens in Destroy() above.  After we
   // Destroy() ourself, there's a window in which that information
   // hasn't yet propagated back to the child side and it might still
   // send us layer transactions.  We want to ignore those transactions
--- a/gfx/wr/webrender/src/display_list_flattener.rs
+++ b/gfx/wr/webrender/src/display_list_flattener.rs
@@ -34,16 +34,17 @@ use prim_store::picture::{Picture, Pictu
 use prim_store::text_run::TextRun;
 use render_backend::{DocumentView};
 use resource_cache::{FontInstanceMap, ImageRequest};
 use scene::{Scene, ScenePipeline, StackingContextHelpers};
 use scene_builder::{DocumentResources, InternerMut};
 use spatial_node::{StickyFrameInfo, ScrollFrameKind, SpatialNodeType};
 use std::{f32, mem, usize};
 use std::collections::vec_deque::VecDeque;
+use std::sync::Arc;
 use tiling::{CompositeOps};
 use util::{MaxRect, VecHelper};
 
 #[derive(Debug, Copy, Clone)]
 struct ClipNode {
     id: ClipChainId,
     count: usize,
 }
@@ -2358,17 +2359,17 @@ impl<'a> DisplayListFlattener<'a> {
                     GlyphInstance {
                         index: glyph.index,
                         point: glyph.point - prim_offset,
                     }
                 })
                 .collect();
 
             TextRun {
-                glyphs,
+                glyphs: Arc::new(glyphs),
                 font,
                 offset,
                 shadow: false,
             }
         };
 
         self.add_primitive(
             clip_and_scroll,
--- a/gfx/wr/webrender/src/intern.rs
+++ b/gfx/wr/webrender/src/intern.rs
@@ -1,72 +1,62 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
+//! The interning module provides a generic data structure
+//! interning container. It is similar in concept to a
+//! traditional string interning container, but it is
+//! specialized to the WR thread model.
+//!
+//! There is an Interner structure, that lives in the
+//! scene builder thread, and a DataStore structure
+//! that lives in the frame builder thread.
+//!
+//! Hashing, interning and handle creation is done by
+//! the interner structure during scene building.
+//!
+//! Delta changes for the interner are pushed during
+//! a transaction to the frame builder. The frame builder
+//! is then able to access the content of the interned
+//! handles quickly, via array indexing.
+//!
+//! Epoch tracking ensures that the garbage collection
+//! step which the interner uses to remove items is
+//! only invoked on items that the frame builder thread
+//! is no longer referencing.
+//!
+//! Items in the data store are stored in a traditional
+//! free-list structure, for content access and memory
+//! usage efficiency.
+//!
+//! The epoch is incremented each time a scene is
+//! built. The most recently used scene epoch is
+//! stored inside each handle. This is then used for
+//! cache invalidation.
+
 use api::{LayoutPrimitiveInfo};
 use internal_types::FastHashMap;
 use malloc_size_of::MallocSizeOf;
 use profiler::ResourceProfileCounter;
 use std::fmt::Debug;
 use std::hash::Hash;
 use std::marker::PhantomData;
 use std::{mem, ops, u64};
 use std::sync::atomic::{AtomicUsize, Ordering};
 use util::VecHelper;
 
-/*
-
- The interning module provides a generic data structure
- interning container. It is similar in concept to a
- traditional string interning container, but it is
- specialized to the WR thread model.
-
- There is an Interner structure, that lives in the
- scene builder thread, and a DataStore structure
- that lives in the frame builder thread.
-
- Hashing, interning and handle creation is done by
- the interner structure during scene building.
-
- Delta changes for the interner are pushed during
- a transaction to the frame builder. The frame builder
- is then able to access the content of the interned
- handles quickly, via array indexing.
-
- Epoch tracking ensures that the garbage collection
- step which the interner uses to remove items is
- only invoked on items that the frame builder thread
- is no longer referencing.
-
- Items in the data store are stored in a traditional
- free-list structure, for content access and memory
- usage efficiency.
-
- */
-
-/// The epoch is incremented each time a scene is
-/// built. The most recently used scene epoch is
-/// stored inside each item and handle. This is
-/// then used for cache invalidation (item) and
-/// correctness validation (handle).
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(Debug, Copy, Clone, MallocSizeOf, PartialEq)]
 struct Epoch(u64);
 
-impl Epoch {
-    pub const INVALID: Self = Epoch(u64::MAX);
-}
-
 /// A list of updates to be applied to the data store,
 /// provided by the interning structure.
 pub struct UpdateList<S> {
-    /// The current epoch of the scene builder.
-    epoch: Epoch,
     /// The additions and removals to apply.
     updates: Vec<Update>,
     /// Actual new data to insert.
     data: Vec<S>,
 }
 
 lazy_static! {
     static ref NEXT_UID: AtomicUsize = AtomicUsize::new(0);
@@ -104,48 +94,37 @@ impl <M> Handle<M> where M: Copy {
 }
 
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(MallocSizeOf)]
 pub enum UpdateKind {
     Insert,
     Remove,
-    UpdateEpoch,
 }
 
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(MallocSizeOf)]
 pub struct Update {
     index: usize,
     kind: UpdateKind,
 }
 
-/// The data item is stored with an epoch, for validating
-/// correct access patterns.
-#[cfg_attr(feature = "capture", derive(Serialize))]
-#[cfg_attr(feature = "replay", derive(Deserialize))]
-#[derive(MallocSizeOf)]
-struct Item<T: MallocSizeOf> {
-    epoch: Epoch,
-    data: T,
-}
-
 pub trait InternDebug {
     fn on_interned(&self, _uid: ItemUid) {}
 }
 
 /// The data store lives in the frame builder thread. It
 /// contains a free-list of items for fast access.
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(MallocSizeOf)]
 pub struct DataStore<S, T: MallocSizeOf, M> {
-    items: Vec<Item<T>>,
+    items: Vec<Option<T>>,
     _source: PhantomData<S>,
     _marker: PhantomData<M>,
 }
 
 impl<S, T, M> ::std::default::Default for DataStore<S, T, M>
 where
     S: Debug + MallocSizeOf,
     T: From<S> + MallocSizeOf,
@@ -172,26 +151,21 @@ where
         &mut self,
         update_list: UpdateList<S>,
         profile_counter: &mut ResourceProfileCounter,
     ) {
         let mut data_iter = update_list.data.into_iter();
         for update in update_list.updates {
             match update.kind {
                 UpdateKind::Insert => {
-                    self.items.entry(update.index).set(Item {
-                        data: T::from(data_iter.next().unwrap()),
-                        epoch: update_list.epoch,
-                    });
+                    self.items.entry(update.index).
+                        set(Some(T::from(data_iter.next().unwrap())));
                 }
                 UpdateKind::Remove => {
-                    self.items[update.index].epoch = Epoch::INVALID;
-                }
-                UpdateKind::UpdateEpoch => {
-                    self.items[update.index].epoch = update_list.epoch;
+                    self.items[update.index] = None;
                 }
             }
         }
 
         let per_item_size = mem::size_of::<S>() + mem::size_of::<T>();
         profile_counter.set(self.items.len(), per_item_size * self.items.len());
 
         debug_assert!(data_iter.next().is_none());
@@ -202,34 +176,30 @@ where
 impl<S, T, M> ops::Index<Handle<M>> for DataStore<S, T, M>
 where
     S: MallocSizeOf,
     T: MallocSizeOf,
     M: Copy
 {
     type Output = T;
     fn index(&self, handle: Handle<M>) -> &T {
-        let item = &self.items[handle.index as usize];
-        assert_eq!(item.epoch, handle.epoch);
-        &item.data
+        self.items[handle.index as usize].as_ref().expect("Bad datastore lookup")
     }
 }
 
 /// Retrieve a mutable item from the store via handle
 /// Retrieve an item from the store via handle
 impl<S, T, M> ops::IndexMut<Handle<M>> for DataStore<S, T, M>
 where
     S: MallocSizeOf,
     T: MallocSizeOf,
     M: Copy
 {
     fn index_mut(&mut self, handle: Handle<M>) -> &mut T {
-        let item = &mut self.items[handle.index as usize];
-        assert_eq!(item.epoch, handle.epoch);
-        &mut item.data
+        self.items[handle.index as usize].as_mut().expect("Bad datastore lookup")
     }
 }
 
 /// The main interning data structure. This lives in the
 /// scene builder thread, and handles hashing and interning
 /// unique data structures. It also manages a free-list for
 /// the items in the data store, which is synchronized via
 /// an update list of additions / removals.
@@ -249,17 +219,17 @@ where
     /// Pending list of updates that need to be applied.
     updates: Vec<Update>,
     /// Pending new data to insert.
     update_data: Vec<S>,
     /// The current epoch for the interner.
     current_epoch: Epoch,
     /// The information associated with each interned
     /// item that can be accessed by the interner.
-    local_data: Vec<Item<D>>,
+    local_data: Vec<D>,
 }
 
 impl<S, D, M> ::std::default::Default for Interner<S, D, M>
 where
     S: Eq + Hash + Clone + Debug + MallocSizeOf,
     D: MallocSizeOf,
     M: Copy + Debug + MallocSizeOf,
 {
@@ -292,27 +262,16 @@ where
         &mut self,
         data: &S,
         f: F,
     ) -> Handle<M> where F: FnOnce() -> D {
         // Use get_mut rather than entry here to avoid
         // cloning the (sometimes large) key in the common
         // case, where the data already exists in the interner.
         if let Some(handle) = self.map.get_mut(data) {
-            // Update the epoch in the data store. This
-            // is not strictly needed for correctness, but
-            // is used to ensure items are only accessed
-            // via valid handles.
-            if handle.epoch != self.current_epoch {
-                self.updates.push(Update {
-                    index: handle.index as usize,
-                    kind: UpdateKind::UpdateEpoch,
-                });
-                self.local_data[handle.index as usize].epoch = self.current_epoch;
-            }
             handle.epoch = self.current_epoch;
             return *handle;
         }
 
         // We need to intern a new data item. First, find out
         // if there is a spare slot in the free-list that we
         // can use. Otherwise, append to the end of the list.
         let index = match self.free_list.pop() {
@@ -339,20 +298,17 @@ where
         data.on_interned(handle.uid);
 
         // Store this handle so the next time it is
         // interned, it gets re-used.
         self.map.insert(data.clone(), handle);
 
         // Create the local data for this item that is
         // being interned.
-        self.local_data.entry(index).set(Item {
-            epoch: self.current_epoch,
-            data: f(),
-        });
+        self.local_data.entry(index).set(f());
 
         handle
     }
 
     /// Retrieve the pending list of updates for an interner
     /// that need to be applied to the data store. Also run
     /// a GC step that removes old entries.
     pub fn end_frame_and_get_pending_updates(&mut self) -> UpdateList<S> {
@@ -384,17 +340,16 @@ where
             }
 
             true
         });
 
         let updates = UpdateList {
             updates,
             data,
-            epoch: self.current_epoch,
         };
 
         // Begin the next epoch
         self.current_epoch = Epoch(self.current_epoch.0 + 1);
 
         updates
     }
 }
@@ -403,19 +358,17 @@ where
 impl<S, D, M> ops::Index<Handle<M>> for Interner<S, D, M>
 where
     S: Eq + Clone + Hash + Debug + MallocSizeOf,
     D: MallocSizeOf,
     M: Copy + Debug + MallocSizeOf
 {
     type Output = D;
     fn index(&self, handle: Handle<M>) -> &D {
-        let item = &self.local_data[handle.index as usize];
-        assert_eq!(item.epoch, handle.epoch);
-        &item.data
+        &self.local_data[handle.index as usize]
     }
 }
 
 /// Implement `Internable` for a type that wants participate in interning.
 ///
 /// see DisplayListFlattener::add_interned_primitive<P>
 pub trait Internable {
     type Marker: Copy + Debug + MallocSizeOf;
--- a/gfx/wr/webrender/src/picture.rs
+++ b/gfx/wr/webrender/src/picture.rs
@@ -31,16 +31,17 @@ use render_backend::FrameResources;
 use render_task::{ClearMode, RenderTask, RenderTaskCacheEntryHandle, TileBlit};
 use render_task::{RenderTaskCacheKey, RenderTaskCacheKeyKind, RenderTaskId, RenderTaskLocation};
 use resource_cache::ResourceCache;
 use scene::{FilterOpHelpers, SceneProperties};
 use scene_builder::DocumentResources;
 use smallvec::SmallVec;
 use surface::{SurfaceDescriptor};
 use std::{mem, u16};
+use std::sync::atomic::{AtomicUsize, ATOMIC_USIZE_INIT, Ordering};
 use texture_cache::{Eviction, TextureCacheHandle};
 use tiling::RenderTargetKind;
 use util::{ComparableVec, TransformedRectKind, MatrixHelpers, MaxRect};
 
 /*
  A picture represents a dynamically rendered image. It consists of:
 
  * A number of primitives that are drawn onto the picture.
@@ -73,16 +74,17 @@ impl RetainedTiles {
         RetainedTiles {
             tiles: Vec::new(),
             ref_prims: FastHashMap::default(),
         }
     }
 
     /// Merge items from one retained tiles into another.
     pub fn merge(&mut self, other: RetainedTiles) {
+        assert!(self.tiles.is_empty() || other.tiles.is_empty());
         self.tiles.extend(other.tiles);
         self.ref_prims.extend(other.ref_prims);
     }
 }
 
 /// Unit for tile coordinates.
 #[derive(Hash, Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
 pub struct TileCoordinate;
@@ -105,22 +107,23 @@ const FRAMES_BEFORE_CACHING: usize = 2;
 //           profiling / telemetry to see when it makes sense
 //           to cache a picture.
 const MAX_CACHE_SIZE: f32 = 2048.0;
 /// The maximum size per axis of a surface,
 ///  in WorldPixel coordinates.
 const MAX_SURFACE_SIZE: f32 = 4096.0;
 
 
-/// The number of primitives to search for, trying to correlate
-/// the offset between one display list and another.
-const MAX_PRIMS_TO_CORRELATE: usize = 64;
-/// The minmum number of primitives we need to correlate in
-/// order to consider it a success.
-const MIN_PRIMS_TO_CORRELATE: usize = MAX_PRIMS_TO_CORRELATE / 4;
+/// The maximum number of primitives to look for in a display
+/// list, trying to find unique primitives.
+const MAX_PRIMS_TO_SEARCH: usize = 128;
+
+/// Used to get unique tile IDs, even when the tile cache is
+/// destroyed between display lists / scenes.
+static NEXT_TILE_ID: AtomicUsize = ATOMIC_USIZE_INIT;
 
 /// Information about the state of an opacity binding.
 #[derive(Debug)]
 pub struct OpacityBindingInfo {
     /// The current value retrieved from dynamic scene properties.
     value: f32,
     /// True if it was changed (or is new) since the last frame build.
     changed: bool,
@@ -208,27 +211,33 @@ impl Tile {
 
     /// Clear the dependencies for a tile.
     fn clear(&mut self) {
         self.transforms.clear();
         self.descriptor.clear();
         self.potential_clips.clear();
     }
 
-    /// Update state related to whether a tile has the same
-    /// content and is valid to use.
-    fn update_validity(&mut self, tile_bounding_rect: &WorldRect) {
+    /// Invalidate a tile based on change in content. This
+    /// muct be called even if the tile is not currently
+    /// visible on screen. We might be able to improve this
+    /// later by changing how ComparableVec is used.
+    fn update_content_validity(&mut self) {
         // Check if the contents of the primitives, clips, and
         // other dependencies are the same.
         self.is_same_content &= self.descriptor.is_same_content();
-
+        self.is_valid &= self.is_same_content;
+    }
+
+    /// Update state related to whether a tile has a valid rect that
+    /// covers the required visible part of the tile.
+    fn update_rect_validity(&mut self, tile_bounding_rect: &WorldRect) {
         // The tile is only valid if:
         // - The content is the same *and*
         // - The valid part of the tile includes the needed part.
-        self.is_valid &= self.is_same_content;
         self.is_valid &= self.valid_rect.contains_rect(tile_bounding_rect);
 
         // Update count of how many times this tile has had the same content.
         if !self.is_same_content {
             self.same_frames = 0;
         }
         self.same_frames += 1;
     }
@@ -357,78 +366,122 @@ pub struct TileCache {
     /// A list of blits from the framebuffer to be applied during this frame.
     pub pending_blits: Vec<TileBlit>,
     /// The current world bounding rect of this tile cache. This is used
     /// to derive a local clip rect, such that we don't obscure in the
     /// z-buffer any items placed earlier in the render order (such as
     /// scroll bars in gecko, when the content overflows under the
     /// scroll bar).
     world_bounding_rect: WorldRect,
-    /// Counter for the next id to assign for a new tile.
-    next_id: usize,
     /// List of reference primitive information used for
     /// correlating the position between display lists.
-    reference_prims: Vec<ReferencePrimitive>,
+    reference_prims: ReferencePrimitiveList,
     /// The root clip chain for this tile cache.
     root_clip_chain_id: ClipChainId,
 }
 
 /// Stores information about a primitive in the cache that we will
 /// try to use to correlate positions between display lists.
+#[derive(Clone)]
 struct ReferencePrimitive {
     uid: ItemUid,
     local_pos: LayoutPoint,
     spatial_node_index: SpatialNodeIndex,
+    ref_count: usize,
+}
+
+/// A list of primitive with uids that only exist once in a display
+/// list. Used to obtain reference points to correlate the offset
+/// between two similar display lists.
+struct ReferencePrimitiveList {
+    ref_prims: Vec<ReferencePrimitive>,
+}
+
+impl ReferencePrimitiveList {
+    fn new(
+        prim_instances: &[PrimitiveInstance],
+        pictures: &[PicturePrimitive],
+    ) -> Self {
+        let mut map = FastHashMap::default();
+        let mut search_count = 0;
+
+        // Collect a set of primitives that we can
+        // potentially use for correlation.
+        collect_ref_prims(
+            prim_instances,
+            pictures,
+            &mut map,
+            &mut search_count,
+        );
+
+        // Select only primitives where the uid is unique
+        // in the display list, giving the best chance
+        // of finding correct correlations.
+        let ref_prims = map.values().filter(|prim| {
+            prim.ref_count == 1
+        }).cloned().collect();
+
+        ReferencePrimitiveList {
+            ref_prims,
+        }
+    }
 }
 
 /// Collect a sample of primitives from the prim list that can
 /// be used to correlate positions.
-// TODO(gw): Investigate best how to select which primitives to select.
 fn collect_ref_prims(
     prim_instances: &[PrimitiveInstance],
-    ref_prims: &mut Vec<ReferencePrimitive>,
     pictures: &[PicturePrimitive],
+    map: &mut FastHashMap<ItemUid, ReferencePrimitive>,
+    search_count: &mut usize,
 ) {
     for prim_instance in prim_instances {
-        if ref_prims.len() >= MAX_PRIMS_TO_CORRELATE {
+        if *search_count > MAX_PRIMS_TO_SEARCH {
             return;
         }
 
         match prim_instance.kind {
             PrimitiveInstanceKind::Picture { pic_index, .. } => {
                 collect_ref_prims(
                     &pictures[pic_index.0].prim_list.prim_instances,
-                    ref_prims,
                     pictures,
+                    map,
+                    search_count,
                 );
             }
             _ => {
-                ref_prims.push(ReferencePrimitive {
-                    uid: prim_instance.uid(),
-                    local_pos: prim_instance.prim_origin,
-                    spatial_node_index: prim_instance.spatial_node_index,
+                let uid = prim_instance.uid();
+
+                let entry = map.entry(uid).or_insert_with(|| {
+                    ReferencePrimitive {
+                        uid,
+                        local_pos: prim_instance.prim_origin,
+                        spatial_node_index: prim_instance.spatial_node_index,
+                        ref_count: 0,
+                    }
                 });
+                entry.ref_count += 1;
+
+                *search_count = *search_count + 1;
             }
         }
     }
 }
 
 impl TileCache {
     pub fn new(
         spatial_node_index: SpatialNodeIndex,
         prim_instances: &[PrimitiveInstance],
         root_clip_chain_id: ClipChainId,
         pictures: &[PicturePrimitive],
     ) -> Self {
         // Build the list of reference primitives
         // for this picture cache.
-        let mut reference_prims = Vec::with_capacity(MAX_PRIMS_TO_CORRELATE);
-        collect_ref_prims(
+        let reference_prims = ReferencePrimitiveList::new(
             prim_instances,
-            &mut reference_prims,
             pictures,
         );
 
         TileCache {
             spatial_node_index,
             tiles: Vec::new(),
             map_local_to_world: SpaceMapper::new(
                 ROOT_SPATIAL_NODE_INDEX,
@@ -439,28 +492,21 @@ impl TileCache {
             dirty_region: None,
             needs_update: true,
             world_origin: WorldPoint::zero(),
             world_tile_size: WorldSize::zero(),
             tile_count: TileSize::zero(),
             scroll_offset: None,
             pending_blits: Vec::new(),
             world_bounding_rect: WorldRect::zero(),
-            next_id: 0,
             reference_prims,
             root_clip_chain_id,
         }
     }
 
-    fn next_id(&mut self) -> TileId {
-        let id = TileId(self.next_id);
-        self.next_id += 1;
-        id
-    }
-
     /// Get the tile coordinates for a given rectangle.
     fn get_tile_coords_for_rect(
         &self,
         rect: &WorldRect,
     ) -> (TileOffset, TileOffset) {
         // Translate the rectangle into the virtual tile space
         let origin = rect.origin - self.world_origin;
 
@@ -507,17 +553,17 @@ impl TileCache {
         } else {
             assert!(self.tiles.is_empty());
             self.tiles = mem::replace(&mut retained_tiles.tiles, Vec::new());
 
             // Get the positions of the reference primitives for this
             // new display list.
             let mut new_prim_map = FastHashMap::default();
             build_ref_prims(
-                &self.reference_prims,
+                &self.reference_prims.ref_prims,
                 &mut new_prim_map,
                 frame_context.clip_scroll_tree,
             );
 
             // Attempt to correlate them to work out which offset to apply.
             correlate_prim_maps(
                 &retained_tiles.ref_prims,
                 &new_prim_map,
@@ -651,17 +697,20 @@ impl TileCache {
         for y in 0 .. y_tiles {
             for x in 0 .. x_tiles {
                 let px = p0.x + x as f32 * TILE_SIZE_WIDTH as f32;
                 let py = p0.y + y as f32 * TILE_SIZE_HEIGHT as f32;
                 let key = (px.round() as i32, py.round() as i32);
 
                 let mut tile = match old_tiles.remove(&key) {
                     Some(tile) => tile,
-                    None => Tile::new(self.next_id()),
+                    None => {
+                        let next_id = TileId(NEXT_TILE_ID.fetch_add(1, Ordering::Relaxed));
+                        Tile::new(next_id)
+                    }
                 };
 
                 tile.world_rect = WorldRect::new(
                     WorldPoint::new(
                         px / frame_context.device_pixel_scale.0,
                         py / frame_context.device_pixel_scale.0,
                     ),
                     self.world_tile_size,
@@ -1102,28 +1151,31 @@ impl TileCache {
                 // assuming that it's either visible or we want to retain it for
                 // a while in case it gets scrolled back onto screen soon.
                 // TODO(gw): Consider switching to manual eviction policy?
                 resource_cache.texture_cache.request(&tile.handle, gpu_cache);
             } else {
                 tile.is_valid = false;
             }
 
+            // Invalidate the tile based on the content changing.
+            tile.update_content_validity();
+
             let visible_rect = match tile.visible_rect {
                 Some(rect) => rect,
                 None => continue,
             };
 
-            // Check the content of the tile is the same
+            // Check the valid rect of the primitive is sufficient.
             let tile_bounding_rect = match visible_rect.intersection(&self.world_bounding_rect) {
                 Some(rect) => rect.translate(&-tile.world_rect.origin.to_vector()),
                 None => continue,
             };
 
-            tile.update_validity(&tile_bounding_rect);
+            tile.update_rect_validity(&tile_bounding_rect);
 
             // If there are no primitives there is no need to draw or cache it.
             if tile.descriptor.prims.is_empty() {
                 continue;
             }
 
             // Decide how to handle this tile when drawing this frame.
             if tile.is_valid {
@@ -1135,23 +1187,23 @@ impl TileCache {
                         let tile_device_rect = tile.world_rect * frame_context.device_pixel_scale;
                         let mut label_pos = tile_device_rect.origin + DeviceVector2D::new(20.0, 30.0);
                         _scratch.push_debug_rect(
                             tile_device_rect,
                             debug_colors::GREEN,
                         );
                         _scratch.push_debug_string(
                             label_pos,
-                            debug_colors::WHITE,
-                            format!("{:?}", tile.id),
+                            debug_colors::RED,
+                            format!("{:?} {:?}", tile.id, tile.handle),
                         );
                         label_pos.y += 20.0;
                         _scratch.push_debug_string(
                             label_pos,
-                            debug_colors::WHITE,
+                            debug_colors::RED,
                             format!("same: {} frames", tile.same_frames),
                         );
                     }
                 }
             } else {
                 // Add the tile rect to the dirty rect.
                 dirty_world_rect = dirty_world_rect.union(&visible_rect);
 
@@ -1746,17 +1798,17 @@ impl PicturePrimitive {
         mut self,
         retained_tiles: &mut RetainedTiles,
         clip_scroll_tree: &ClipScrollTree,
     ) {
         if let Some(tile_cache) = self.tile_cache.take() {
             // Calculate and store positions of the reference
             // primitives for this tile cache.
             build_ref_prims(
-                &tile_cache.reference_prims,
+                &tile_cache.reference_prims.ref_prims,
                 &mut retained_tiles.ref_prims,
                 clip_scroll_tree,
             );
 
             for tile in tile_cache.tiles {
                 retained_tiles.tiles.push(tile);
             }
         }
@@ -2868,25 +2920,19 @@ fn correlate_prim_maps(
     }
 
     // Calculate the mode (the most common frequency of offset). This
     // can be different for some primitives, if they've animated, or
     // are attached to a different scroll node etc.
     map.into_iter()
         .max_by_key(|&(_, count)| count)
         .and_then(|(offset, count)| {
-            // We will assume we can use the calculated offset if:
-            // (a) We found more than one quarter of the selected
-            //     reference primitives to have the same offset.
-            // (b) The display lists both had the same number of
-            //     primitives, and we exactly matched. This handles
-            //     edge cases like scenes where there are very
-            //     few primitives, while excluding edge cases like
-            //     dl_mutate that have thousands of primitives with
-            //     the same uid.
-            if (count >= MIN_PRIMS_TO_CORRELATE) ||
-               (count == old_prims.len() && count == new_prims.len()) {
+            // We will assume we can use the calculated offset if we
+            // found more than one quarter of the selected reference
+            // primitives to have the same offset.
+            let prims_available = new_prims.len().min(old_prims.len());
+            if count >= prims_available / 4 {
                 Some(offset.into())
             } else {
                 None
             }
         })
 }
--- a/gfx/wr/webrender/src/prim_store/text_run.rs
+++ b/gfx/wr/webrender/src/prim_store/text_run.rs
@@ -13,42 +13,44 @@ use intern;
 use prim_store::{PrimitiveOpacity, PrimitiveSceneData,  PrimitiveScratchBuffer};
 use prim_store::{PrimitiveStore, PrimKeyCommonData, PrimTemplateCommonData, VectorKey};
 use render_task::{RenderTaskTree};
 use renderer::{MAX_VERTEX_TEXTURE_WIDTH};
 use resource_cache::{ResourceCache};
 use util::{MatrixHelpers};
 use prim_store::PrimitiveInstanceKind;
 use std::ops;
+use std::sync::Arc;
 use storage;
+use util::PrimaryArc;
 
 /// A run of glyphs, with associated font information.
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(Debug, Clone, Eq, MallocSizeOf, PartialEq, Hash)]
 pub struct TextRunKey {
     pub common: PrimKeyCommonData,
     pub font: FontInstance,
     pub offset: VectorKey,
-    pub glyphs: Vec<GlyphInstance>,
+    pub glyphs: PrimaryArc<Vec<GlyphInstance>>,
     pub shadow: bool,
 }
 
 impl TextRunKey {
     pub fn new(
         info: &LayoutPrimitiveInfo,
         text_run: TextRun,
     ) -> Self {
         TextRunKey {
             common: PrimKeyCommonData::with_info(
                 info,
             ),
             font: text_run.font,
             offset: text_run.offset.into(),
-            glyphs: text_run.glyphs,
+            glyphs: PrimaryArc(text_run.glyphs),
             shadow: text_run.shadow,
         }
     }
 }
 
 impl intern::InternDebug for TextRunKey {}
 
 impl AsInstanceKind<TextRunDataHandle> for TextRunKey {
@@ -71,17 +73,18 @@ impl AsInstanceKind<TextRunDataHandle> f
 
 #[cfg_attr(feature = "capture", derive(Serialize))]
 #[cfg_attr(feature = "replay", derive(Deserialize))]
 #[derive(MallocSizeOf)]
 pub struct TextRunTemplate {
     pub common: PrimTemplateCommonData,
     pub font: FontInstance,
     pub offset: LayoutVector2D,
-    pub glyphs: Vec<GlyphInstance>,
+    #[ignore_malloc_size_of = "Measured via PrimaryArc"]
+    pub glyphs: Arc<Vec<GlyphInstance>>,
 }
 
 impl ops::Deref for TextRunTemplate {
     type Target = PrimTemplateCommonData;
     fn deref(&self) -> &Self::Target {
         &self.common
     }
 }
@@ -94,17 +97,17 @@ impl ops::DerefMut for TextRunTemplate {
 
 impl From<TextRunKey> for TextRunTemplate {
     fn from(item: TextRunKey) -> Self {
         let common = PrimTemplateCommonData::with_key_common(item.common);
         TextRunTemplate {
             common,
             font: item.font,
             offset: item.offset.into(),
-            glyphs: item.glyphs,
+            glyphs: item.glyphs.0,
         }
     }
 }
 
 impl TextRunTemplate {
     /// Update the GPU cache for a given primitive template. This may be called multiple
     /// times per frame, by each primitive reference that refers to this interned
     /// template. The initial request call to the GPU cache ensures that work is only
@@ -166,17 +169,17 @@ pub struct TextRunDataMarker;
 pub type TextRunDataStore = intern::DataStore<TextRunKey, TextRunTemplate, TextRunDataMarker>;
 pub type TextRunDataHandle = intern::Handle<TextRunDataMarker>;
 pub type TextRunDataUpdateList = intern::UpdateList<TextRunKey>;
 pub type TextRunDataInterner = intern::Interner<TextRunKey, PrimitiveSceneData, TextRunDataMarker>;
 
 pub struct TextRun {
     pub font: FontInstance,
     pub offset: LayoutVector2D,
-    pub glyphs: Vec<GlyphInstance>,
+    pub glyphs: Arc<Vec<GlyphInstance>>,
     pub shadow: bool,
 }
 
 impl intern::Internable for TextRun {
     type Marker = TextRunDataMarker;
     type Source = TextRunKey;
     type StoreData = TextRunTemplate;
     type InternData = PrimitiveSceneData;
@@ -331,13 +334,13 @@ impl TextRunPrimitive {
 fn test_struct_sizes() {
     use std::mem;
     // The sizes of these structures are critical for performance on a number of
     // talos stress tests. If you get a failure here on CI, there's two possibilities:
     // (a) You made a structure smaller than it currently is. Great work! Update the
     //     test expectations and move on.
     // (b) You made a structure larger. This is not necessarily a problem, but should only
     //     be done with care, and after checking if talos performance regresses badly.
-    assert_eq!(mem::size_of::<TextRun>(), 112, "TextRun size changed");
-    assert_eq!(mem::size_of::<TextRunTemplate>(), 128, "TextRunTemplate size changed");
-    assert_eq!(mem::size_of::<TextRunKey>(), 120, "TextRunKey size changed");
+    assert_eq!(mem::size_of::<TextRun>(), 96, "TextRun size changed");
+    assert_eq!(mem::size_of::<TextRunTemplate>(), 112, "TextRunTemplate size changed");
+    assert_eq!(mem::size_of::<TextRunKey>(), 104, "TextRunKey size changed");
     assert_eq!(mem::size_of::<TextRunPrimitive>(), 88, "TextRunPrimitive size changed");
 }
--- a/gfx/wr/webrender/src/renderer.rs
+++ b/gfx/wr/webrender/src/renderer.rs
@@ -4246,17 +4246,17 @@ impl Renderer {
         let debug_renderer = match self.debug.get_mut(&mut self.device) {
             Some(render) => render,
             None => return,
         };
 
         for item in items {
             match item {
                 DebugItem::Rect { rect, color } => {
-                    let inner_color = color.scale_alpha(0.2).into();
+                    let inner_color = color.scale_alpha(0.1).into();
                     let outer_color = (*color).into();
 
                     debug_renderer.add_quad(
                         rect.origin.x,
                         rect.origin.y,
                         rect.origin.x + rect.size.width,
                         rect.origin.y + rect.size.height,
                         inner_color,
--- a/gfx/wr/webrender/src/util.rs
+++ b/gfx/wr/webrender/src/util.rs
@@ -1,20 +1,23 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 use api::{BorderRadius, DeviceIntPoint, DeviceIntRect, DeviceIntSize, DevicePixelScale};
 use api::{LayoutPixel, DeviceRect, WorldPixel, RasterRect};
 use euclid::{Point2D, Rect, Size2D, TypedPoint2D, TypedRect, TypedSize2D, Vector2D};
 use euclid::{TypedTransform2D, TypedTransform3D, TypedVector2D, TypedScale};
+use malloc_size_of::{MallocShallowSizeOf, MallocSizeOf, MallocSizeOfOps};
 use num_traits::Zero;
 use plane_split::{Clipper, Polygon};
 use std::{i32, f32, fmt, ptr};
 use std::borrow::Cow;
+use std::os::raw::c_void;
+use std::sync::Arc;
 
 
 // Matches the definition of SK_ScalarNearlyZero in Skia.
 const NEARLY_ZERO: f32 = 1.0 / 4096.0;
 
 /// A typesafe helper that separates new value construction from
 /// vector growing, allowing LLVM to ideally construct the element in place.
 pub struct Allocation<'a, T: 'a> {
@@ -972,8 +975,53 @@ impl<T> ComparableVec<T> where T: Partia
         self.current_index += 1;
     }
 
     /// Return true if the contents of the vec are the same as the previous time.
     pub fn is_valid(&self) -> bool {
         self.is_same && self.prev_len == self.current_index
     }
 }
+
+/// Arc wrapper to support measurement via MallocSizeOf.
+///
+/// Memory reporting for Arcs is tricky because of the risk of double-counting.
+/// One way to measure them is to keep a table of pointers that have already been
+/// traversed. The other way is to use knowledge of the program structure to
+/// identify which Arc instances should be measured and which should be skipped to
+/// avoid double-counting.
+///
+/// This struct implements the second approach. It identifies the "main" pointer
+/// to the Arc-ed resource, and measures the buffer as if it were an owned pointer.
+/// The programmer should ensure that there is at most one PrimaryArc for a given
+/// underlying ArcInner.
+#[cfg_attr(feature = "capture", derive(Serialize))]
+#[cfg_attr(feature = "replay", derive(Deserialize))]
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+pub struct PrimaryArc<T>(pub Arc<T>);
+
+impl<T> ::std::ops::Deref for PrimaryArc<T> {
+    type Target = Arc<T>;
+
+    #[inline]
+    fn deref(&self) -> &Arc<T> {
+        &self.0
+    }
+}
+
+impl<T> MallocShallowSizeOf for PrimaryArc<T> {
+    fn shallow_size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
+        unsafe {
+            // This is a bit sketchy, but std::sync::Arc doesn't expose the
+            // base pointer.
+            let raw_arc_ptr: *const Arc<T> = &self.0;
+            let raw_ptr_ptr: *const *const c_void = raw_arc_ptr as _;
+            let raw_ptr = *raw_ptr_ptr;
+            (ops.size_of_op)(raw_ptr)
+        }
+    }
+}
+
+impl<T: MallocSizeOf> MallocSizeOf for PrimaryArc<T> {
+    fn size_of(&self, ops: &mut MallocSizeOfOps) -> usize {
+        self.shallow_size_of(ops) + (**self).size_of(ops)
+    }
+}
--- a/js/src/builtin/Stream.cpp
+++ b/js/src/builtin/Stream.cpp
@@ -1535,16 +1535,19 @@ static MOZ_MUST_USE JSObject* ReadableSt
   // Step 1: Let prototype be null.
   // Step 2: If forAuthorCode is true, set prototype to %ObjectPrototype%.
   RootedObject templateObject(
       cx,
       forAuthorCode == ForAuthorCodeBool::Yes
           ? cx->realm()->getOrCreateIterResultTemplateObject(cx)
           : cx->realm()->getOrCreateIterResultWithoutPrototypeTemplateObject(
                 cx));
+  if (!templateObject) {
+    return nullptr;
+  }
 
   // Step 3: Assert: Type(done) is Boolean (implicit).
 
   // Step 4: Let obj be ObjectCreate(prototype).
   NativeObject* obj;
   JS_TRY_VAR_OR_RETURN_NULL(
       cx, obj,
       NativeObject::createWithTemplate(cx, gc::DefaultHeap, templateObject));
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/stream/bug-1515816.js
@@ -0,0 +1,17 @@
+// |jit-test| --no-ion; --no-baseline; skip-if: !('oomAfterAllocations' in this)
+// Don't crash on OOM in ReadableStreamDefaultReader.prototype.read().
+
+for (let n = 1; n < 1000; n++) {
+  let stream = new ReadableStream({
+    start(controller) {
+      controller.enqueue(7);
+    }
+  });
+  let reader = stream.getReader();
+  oomAfterAllocations(n);
+  try {
+    reader.read();
+    n = 1000;
+  } catch {}
+  resetOOMFailure();
+}
--- a/layout/base/nsDocumentViewer.cpp
+++ b/layout/base/nsDocumentViewer.cpp
@@ -2342,19 +2342,19 @@ UniquePtr<ServoStyleSet> nsDocumentViewe
   // that non-XUL (typically HTML) documents commonly use.
   styleSet->AppendStyleSheet(SheetType::Agent, cache->MinimalXULSheet());
 
   // Only load the full XUL sheet if we'll need it.
   if (aDocument->LoadsFullXULStyleSheetUpFront()) {
     styleSet->AppendStyleSheet(SheetType::Agent, cache->XULSheet());
   }
 
-  // Append chrome sheets (scrollbars + forms).
   styleSet->AppendStyleSheet(SheetType::Agent, cache->FormsSheet());
   styleSet->AppendStyleSheet(SheetType::Agent, cache->ScrollbarsSheet());
+  styleSet->AppendStyleSheet(SheetType::Agent, cache->PluginProblemSheet());
 
   for (StyleSheet* sheet : *sheetService->AgentStyleSheets()) {
     styleSet->AppendStyleSheet(SheetType::Agent, sheet);
   }
 
   return styleSet;
 }
 
--- a/layout/style/UserAgentStyleSheetList.h
+++ b/layout/style/UserAgentStyleSheetList.h
@@ -23,13 +23,14 @@ STYLE_SHEET(ContentEditable, "resource:/
 STYLE_SHEET(CounterStyles, "resource://gre-resources/counterstyles.css", false)
 STYLE_SHEET(DesignMode, "resource://gre/res/designmode.css", true)
 STYLE_SHEET(Forms, "resource://gre-resources/forms.css", true)
 STYLE_SHEET(HTML, "resource://gre-resources/html.css", false)
 STYLE_SHEET(MathML, "resource://gre-resources/mathml.css", true)
 STYLE_SHEET(MinimalXUL, "chrome://global/content/minimal-xul.css", false)
 STYLE_SHEET(NoFrames, "resource://gre-resources/noframes.css", true)
 STYLE_SHEET(NoScript, "resource://gre-resources/noscript.css", true)
+STYLE_SHEET(PluginProblem, "resource://gre-resources/pluginproblem.css", true)
 STYLE_SHEET(Quirk, "resource://gre-resources/quirk.css", false)
 STYLE_SHEET(Scrollbars, "chrome://global/skin/scrollbars.css", true)
 STYLE_SHEET(SVG, "resource://gre/res/svg.css", false)
 STYLE_SHEET(UA, "resource://gre-resources/ua.css", true)
 STYLE_SHEET(XUL, "chrome://global/content/xul.css", true)
--- a/layout/style/jar.mn
+++ b/layout/style/jar.mn
@@ -5,16 +5,17 @@
 toolkit.jar:
 *  res/ua.css                                (res/ua.css)
 *  res/html.css                              (res/html.css)
    res/quirk.css                             (res/quirk.css)
    res/counterstyles.css                     (res/counterstyles.css)
    res/noscript.css                          (res/noscript.css)
    res/noframes.css                          (res/noframes.css)
 *  res/forms.css                             (res/forms.css)
+   res/pluginproblem.css                     (res/pluginproblem.css)
    res/arrow.gif                             (res/arrow.gif)
    res/arrow-left.gif                        (res/arrow-left.gif)
    res/arrow-right.gif                       (res/arrow-right.gif)
    res/arrowd.gif                            (res/arrowd.gif)
    res/arrowd-left.gif                       (res/arrowd-left.gif)
    res/arrowd-right.gif                      (res/arrowd-right.gif)
    res/accessiblecaret-normal@1x.png         (res/accessiblecaret-normal@1x.png)
    res/accessiblecaret-normal@1.5x.png       (res/accessiblecaret-normal@1.5x.png)
rename from toolkit/pluginproblem/content/pluginProblemBinding.css
rename to layout/style/res/pluginproblem.css
--- a/mobile/android/installer/package-manifest.in
+++ b/mobile/android/installer/package-manifest.in
@@ -162,17 +162,16 @@
 #endif
 
 @BINPATH@/components/ClearDataService.manifest
 @BINPATH@/components/ClearDataService.js
 
 @BINPATH@/components/nsUpdateTimerManager.manifest
 @BINPATH@/components/nsUpdateTimerManager.js
 
-@BINPATH@/components/pluginGlue.manifest
 @BINPATH@/components/ProcessSingleton.manifest
 @BINPATH@/components/MainProcessSingleton.js
 @BINPATH@/components/ContentProcessSingleton.js
 @BINPATH@/components/nsURLFormatter.manifest
 @BINPATH@/components/nsURLFormatter.js
 @BINPATH@/components/ContentPrefService2.manifest
 @BINPATH@/components/ContentPrefService2.js
 @BINPATH@/components/HandlerService.manifest
--- a/mozglue/build/MozglueUtils.h
+++ b/mozglue/build/MozglueUtils.h
@@ -4,51 +4,137 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_glue_MozglueUtils_h
 #define mozilla_glue_MozglueUtils_h
 
 #include <windows.h>
 
+#include "mozilla/Atomics.h"
 #include "mozilla/Attributes.h"
 
 namespace mozilla {
 namespace glue {
 
+#ifdef DEBUG
+
+class MOZ_STATIC_CLASS Win32SRWLock final {
+ public:
+  // Microsoft guarantees that '0' is never a valid thread id
+  // https://docs.microsoft.com/en-ca/windows/desktop/ProcThread/thread-handles-and-identifiers
+  static const DWORD kInvalidThreadId = 0;
+
+  constexpr Win32SRWLock()
+      : mExclusiveThreadId(kInvalidThreadId), mLock(SRWLOCK_INIT) {}
+
+  ~Win32SRWLock() { MOZ_ASSERT(mExclusiveThreadId == kInvalidThreadId); }
+
+  void LockShared() {
+    MOZ_ASSERT(
+        mExclusiveThreadId != GetCurrentThreadId(),
+        "Deadlock detected - A thread attempted to acquire a shared lock on "
+        "a SRWLOCK when it already owns the exclusive lock on it.");
+
+    ::AcquireSRWLockShared(&mLock);
+  }
+
+  void UnlockShared() { ::ReleaseSRWLockShared(&mLock); }
+
+  void LockExclusive() {
+    MOZ_ASSERT(
+        mExclusiveThreadId != GetCurrentThreadId(),
+        "Deadlock detected - A thread attempted to acquire an exclusive lock "
+        "on a SRWLOCK when it already owns the exclusive lock on it.");
+
+    ::AcquireSRWLockExclusive(&mLock);
+    mExclusiveThreadId = GetCurrentThreadId();
+  }
+
+  void UnlockExclusive() {
+    MOZ_ASSERT(mExclusiveThreadId == GetCurrentThreadId());
+
+    mExclusiveThreadId = kInvalidThreadId;
+    ::ReleaseSRWLockExclusive(&mLock);
+  }
+
+  Win32SRWLock(const Win32SRWLock&) = delete;
+  Win32SRWLock(Win32SRWLock&&) = delete;
+  Win32SRWLock& operator=(const Win32SRWLock&) = delete;
+  Win32SRWLock& operator=(Win32SRWLock&&) = delete;
+
+ private:
+  // "Relaxed" memory ordering is fine. Threads will see other thread IDs
+  // appear here in some non-deterministic ordering (or not at all) and simply
+  // ignore them.
+  //
+  // But a thread will only read its own ID if it previously wrote it, and a
+  // single thread doesn't need a memory barrier to read its own write.
+
+  Atomic<DWORD, Relaxed> mExclusiveThreadId;
+  SRWLOCK mLock;
+};
+
+#else  // DEBUG
+
+class MOZ_STATIC_CLASS Win32SRWLock final {
+ public:
+  constexpr Win32SRWLock() : mLock(SRWLOCK_INIT) {}
+
+  void LockShared() { ::AcquireSRWLockShared(&mLock); }
+
+  void UnlockShared() { ::ReleaseSRWLockShared(&mLock); }
+
+  void LockExclusive() { ::AcquireSRWLockExclusive(&mLock); }
+
+  void UnlockExclusive() { ::ReleaseSRWLockExclusive(&mLock); }
+
+  ~Win32SRWLock() = default;
+
+  Win32SRWLock(const Win32SRWLock&) = delete;
+  Win32SRWLock(Win32SRWLock&&) = delete;
+  Win32SRWLock& operator=(const Win32SRWLock&) = delete;
+  Win32SRWLock& operator=(Win32SRWLock&&) = delete;
+
+ private:
+  SRWLOCK mLock;
+};
+
+#endif
+
 class MOZ_RAII AutoSharedLock final {
  public:
-  explicit AutoSharedLock(SRWLOCK& aLock) : mLock(aLock) {
-    ::AcquireSRWLockShared(&aLock);
+  explicit AutoSharedLock(Win32SRWLock& aLock) : mLock(aLock) {
+    mLock.LockShared();
   }
 
-  ~AutoSharedLock() { ::ReleaseSRWLockShared(&mLock); }
+  ~AutoSharedLock() { mLock.UnlockShared(); }
 
   AutoSharedLock(const AutoSharedLock&) = delete;
   AutoSharedLock(AutoSharedLock&&) = delete;
   AutoSharedLock& operator=(const AutoSharedLock&) = delete;
   AutoSharedLock& operator=(AutoSharedLock&&) = delete;
 
  private:
-  SRWLOCK& mLock;
+  Win32SRWLock& mLock;
 };
 
 class MOZ_RAII AutoExclusiveLock final {
  public:
-  explicit AutoExclusiveLock(SRWLOCK& aLock) : mLock(aLock) {
-    ::AcquireSRWLockExclusive(&aLock);
+  explicit AutoExclusiveLock(Win32SRWLock& aLock) : mLock(aLock) {
+    mLock.LockExclusive();
   }
 
-  ~AutoExclusiveLock() { ::ReleaseSRWLockExclusive(&mLock); }
+  ~AutoExclusiveLock() { mLock.UnlockExclusive(); }
 
   AutoExclusiveLock(const AutoExclusiveLock&) = delete;
   AutoExclusiveLock(AutoExclusiveLock&&) = delete;
   AutoExclusiveLock& operator=(const AutoExclusiveLock&) = delete;
   AutoExclusiveLock& operator=(AutoExclusiveLock&&) = delete;
 
  private:
-  SRWLOCK& mLock;
+  Win32SRWLock& mLock;
 };
 
 }  // namespace glue
 }  // namespace mozilla
 
 #endif  //  mozilla_glue_MozglueUtils_h
--- a/mozglue/build/WindowsDllBlocklist.cpp
+++ b/mozglue/build/WindowsDllBlocklist.cpp
@@ -42,17 +42,17 @@
 #include "mozilla/AutoProfilerLabel.h"
 #include "mozilla/glue/WindowsDllServices.h"
 
 using namespace mozilla;
 
 using CrashReporter::Annotation;
 using CrashReporter::AnnotationToString;
 
-static SRWLOCK gDllServicesLock = SRWLOCK_INIT;
+static glue::Win32SRWLock gDllServicesLock;
 static glue::detail::DllServicesBase* gDllServices;
 
 #define DLL_BLOCKLIST_ENTRY(name, ...) {name, __VA_ARGS__},
 #define DLL_BLOCKLIST_STRING_TYPE const char*
 #include "mozilla/WindowsDllBlocklistDefs.h"
 
 // define this for very verbose dll load debug spew
 #undef DEBUG_very_verbose
--- a/netwerk/base/nsINetUtil.idl
+++ b/netwerk/base/nsINetUtil.idl
@@ -219,15 +219,20 @@ interface nsINetUtil : nsISupports
    *
    * @param aPolicy
    *        the policy integer code (defined in nsIHttpChannel.idl)
    * @return aPolicyString
    *         referrer policy string from code
    */
   ACString getReferrerPolicyString(in unsigned long aPolicy);
 
+  /**
+   * This is test-only. Send an IPC message to let socket process send a
+   * telemetry.
+   */
+  void socketProcessTelemetryPing();
 
   /**
    * This is a void method that is C++ implemented and always
    * returns NS_ERROR_NOT_IMPLEMENTED. To be used for testing.
    */
   void notImplemented();
 };
--- a/netwerk/base/nsIOService.cpp
+++ b/netwerk/base/nsIOService.cpp
@@ -502,16 +502,25 @@ RefPtr<MemoryReportingProcess> nsIOServi
   if (!Preferences::GetBool("network.process.enabled") || !SocketProcessReady()) {
     return nullptr;
 
   }
 
   return new SocketProcessMemoryReporter();
 }
 
+NS_IMETHODIMP
+nsIOService::SocketProcessTelemetryPing() {
+  CallOrWaitForSocketProcess([]() {
+    Unused << gIOService->mSocketProcess->GetActor()
+                  ->SendSocketProcessTelemetryPing();
+  });
+  return NS_OK;
+}
+
 NS_IMPL_ISUPPORTS(nsIOService, nsIIOService, nsINetUtil, nsISpeculativeConnect,
                   nsIObserver, nsIIOServiceInternal, nsISupportsWeakReference)
 
 ////////////////////////////////////////////////////////////////////////////////
 
 nsresult nsIOService::RecheckCaptivePortal() {
   MOZ_ASSERT(NS_IsMainThread(), "Must be called on the main thread");
   if (!mCaptivePortalService) {
--- a/netwerk/ipc/PSocketProcess.ipdl
+++ b/netwerk/ipc/PSocketProcess.ipdl
@@ -38,12 +38,14 @@ child:
   async PreferenceUpdate(Pref pref);
   async RequestMemoryReport(uint32_t generation,
                             bool anonymize,
                             bool minimizeMemoryUsage,
                             MaybeFileDesc DMDFile);
   async SetOffline(bool offline);
   async InitSocketProcessBridgeParent(ProcessId processId, Endpoint<PSocketProcessBridgeParent> endpoint);
   async InitProfiler(Endpoint<PProfilerChild> aEndpoint);
+  // test-only
+  async SocketProcessTelemetryPing();
 };
 
 } // namespace net
 } // namespace mozilla
--- a/netwerk/ipc/SocketProcessChild.cpp
+++ b/netwerk/ipc/SocketProcessChild.cpp
@@ -146,16 +146,23 @@ mozilla::ipc::IPCResult SocketProcessChi
     Endpoint<PProfilerChild>&& aEndpoint) {
 #ifdef MOZ_GECKO_PROFILER
   mProfilerController =
       mozilla::ChildProfilerController::Create(std::move(aEndpoint));
 #endif
   return IPC_OK();
 }
 
+mozilla::ipc::IPCResult SocketProcessChild::RecvSocketProcessTelemetryPing() {
+  const uint32_t kExpectedUintValue = 42;
+  Telemetry::ScalarSet(Telemetry::ScalarID::TELEMETRY_TEST_SOCKET_ONLY_UINT,
+      kExpectedUintValue);
+  return IPC_OK();
+}
+
 void SocketProcessChild::DestroySocketProcessBridgeParent(ProcessId aId) {
   MOZ_ASSERT(NS_IsMainThread());
 
   mSocketProcessBridgeParentMap.Remove(aId);
 }
 
 }  // namespace net
 }  // namespace mozilla
--- a/netwerk/ipc/SocketProcessChild.h
+++ b/netwerk/ipc/SocketProcessChild.h
@@ -37,16 +37,17 @@ class SocketProcessChild final : public 
       const uint32_t& generation, const bool& anonymize,
       const bool& minimizeMemoryUsage, const MaybeFileDesc& DMDFile) override;
   mozilla::ipc::IPCResult RecvSetOffline(const bool& aOffline) override;
   mozilla::ipc::IPCResult RecvInitSocketProcessBridgeParent(
       const ProcessId& aContentProcessId,
       Endpoint<mozilla::net::PSocketProcessBridgeParent>&& aEndpoint) override;
   mozilla::ipc::IPCResult RecvInitProfiler(
       Endpoint<mozilla::PProfilerChild>&& aEndpoint) override;
+  mozilla::ipc::IPCResult RecvSocketProcessTelemetryPing() override;
 
   void CleanUp();
   void DestroySocketProcessBridgeParent(ProcessId aId);
 
  private:
   // Mapping of content process id and the SocketProcessBridgeParent.
   // This table keeps SocketProcessBridgeParent alive in socket process.
   nsRefPtrHashtable<nsUint32HashKey, SocketProcessBridgeParent>
--- a/third_party/prio/README-mozilla
+++ b/third_party/prio/README-mozilla
@@ -1,12 +1,12 @@
 This directory contains the Prio source from the upstream repo:
 https://github.com/mozilla/libprio
 
-Current version: 1.2 [commit 02a81fb652d385d0f4f10989d051317097ab55fb]
+Current version: 1.4 [commit a95cfdd5eaf7104582709c54ef23395d24d7f7fd]
 
 UPDATING:
 
 Our in-tree copy of Prio does not depend on any generated files from the
 upstream build system. Therefore, it should be sufficient to simply overwrite
 the in-tree files one the updated ones from upstream to perform updates.
 
 To simplify this, the in-tree copy can be updated by running
--- a/third_party/prio/include/mprio.h
+++ b/third_party/prio/include/mprio.h
@@ -72,109 +72,140 @@ void Prio_clear();
 
 /*
  * PrioConfig holds the system parameters. The two relevant things determined
  * by the config object are:
  *    (1) the number of data fields we are collecting, and
  *    (2) the modulus we use for modular arithmetic.
  * The default configuration uses an 87-bit modulus.
  *
+ * The value `nFields` must be in the range `0 < nFields <= max`, where `max`
+ * is the value returned by the function `PrioConfig_maxDataFields()` below.
+ *
  * The `batch_id` field specifies which "batch" of aggregate statistics we are
  * computing. For example, if the aggregate statistics are computed every 24
  * hours, the `batch_id` might be set to an encoding of the date. The clients
  * and servers must all use the same `batch_id` for each run of the protocol.
  * Each set of aggregate statistics should use a different `batch_id`.
  *
  * `PrioConfig_new` does not keep a pointer to the `batch_id` string that the
  * caller passes in, so you may free the `batch_id` string as soon as
  * `PrioConfig_new` returns.
  */
-PrioConfig PrioConfig_new(int n_fields, PublicKey server_a, PublicKey server_b,
-                          const unsigned char* batch_id,
-                          unsigned int batch_id_len);
+PrioConfig PrioConfig_new(int nFields, PublicKey serverA, PublicKey serverB,
+                          const unsigned char* batchId,
+                          unsigned int batchIdLen);
 void PrioConfig_clear(PrioConfig cfg);
 int PrioConfig_numDataFields(const_PrioConfig cfg);
 
 /*
+ * Return the maximum number of data fields that the implementation supports.
+ */
+int PrioConfig_maxDataFields(void);
+
+/*
  * Create a PrioConfig object with no encryption keys.  This routine is
  * useful for testing, but PrioClient_encode() will always fail when used with
  * this config.
  */
-PrioConfig PrioConfig_newTest(int n_fields);
+PrioConfig PrioConfig_newTest(int nFields);
 
 /*
  * We use the PublicKey and PrivateKey objects for public-key encryption. Each
  * Prio server has an associated public key, and the clients use these keys to
  * encrypt messages to the servers.
  */
 SECStatus Keypair_new(PrivateKey* pvtkey, PublicKey* pubkey);
 
 /*
- * Import a new curve25519 public key from the raw bytes given. The key passed
- * in
- * as `data` should be of length `CURVE25519_KEY_LEN`. This function allocates
- * a new PublicKey object, which the caller must free using `PublicKey_clear`.
+ * Import a new curve25519 public/private key from the raw bytes given.  When
+ * importing a private key, you must pass in the corresponding public key as
+ * well. The byte arrays given as input should be of length
+ * `CURVE25519_KEY_LEN`.
+ *
+ * These functions will allocate a new `PublicKey`/`PrivateKey` object, which
+ * the caller must free using `PublicKey_clear`/`PrivateKey_clear`.
  */
 SECStatus PublicKey_import(PublicKey* pk, const unsigned char* data,
                            unsigned int dataLen);
-
-/*
- * Import a new curve25519 public key from a hex string that contains only the
- * characters 0-9a-fA-F. The hex string passed in as `hex_data` should be of
- * length `CURVE25519_KEY_LEN_HEX`. This function allocates a new PublicKey
- * object, which the caller must free using `PublicKey_clear`.
- */
-SECStatus PublicKey_import_hex(PublicKey* pk, const unsigned char* hex_data,
-                               unsigned int dataLen);
+SECStatus PrivateKey_import(PrivateKey* sk, const unsigned char* privData,
+                            unsigned int privDataLen,
+                            const unsigned char* pubData,
+                            unsigned int pubDataLen);
 
 /*
- * Export a curve25519 public key as a raw byte-array.
+ * Import a new curve25519 public/private key from a hex string that contains
+ * only the characters 0-9a-fA-F.
+ *
+ * The hex strings passed in must each be of length `CURVE25519_KEY_LEN_HEX`.
+ * These functions will allocate a new `PublicKey`/`PrivateKey` object, which
+ * the caller must free using `PublicKey_clear`/`PrivateKey_clear`.
  */
-SECStatus PublicKey_export(const_PublicKey pk,
-                           unsigned char data[CURVE25519_KEY_LEN]);
+SECStatus PublicKey_import_hex(PublicKey* pk, const unsigned char* hexData,
+                               unsigned int dataLen);
+SECStatus PrivateKey_import_hex(PrivateKey* sk,
+                                const unsigned char* privHexData,
+                                unsigned int privDataLen,
+                                const unsigned char* pubHexData,
+                                unsigned int pubDataLen);
 
 /*
- * Export a curve25519 public key as a NULL-terminated hex string.
+ * Export a curve25519 key as a raw byte-array.
+ *
+ * The output buffer `data` must have length exactly `CURVE25519_KEY_LEN`.
  */
-SECStatus PublicKey_export_hex(const_PublicKey pk,
-                               unsigned char data[CURVE25519_KEY_LEN_HEX + 1]);
+SECStatus PublicKey_export(const_PublicKey pk, unsigned char* data,
+                           unsigned int dataLen);
+SECStatus PrivateKey_export(PrivateKey sk, unsigned char* data,
+                            unsigned int dataLen);
+
+/*
+ * Export a curve25519 key as a NULL-terminated hex string.
+ *
+ * The output buffer `data` must have length exactly `CURVE25519_KEY_LEN_HEX +
+ * 1`.
+ */
+SECStatus PublicKey_export_hex(const_PublicKey pk, unsigned char* data,
+                               unsigned int dataLen);
+SECStatus PrivateKey_export_hex(PrivateKey sk, unsigned char* data,
+                                unsigned int dataLen);
 
 void PublicKey_clear(PublicKey pubkey);
 void PrivateKey_clear(PrivateKey pvtkey);
 
 /*
  *  PrioPacketClient_encode
  *
  * Takes as input a pointer to an array (`data_in`) of boolean values
  * whose length is equal to the number of data fields specified in
  * the config. It then encodes the data for servers A and B into a
  * string.
  *
  * NOTE: The caller must free() the strings `for_server_a` and
  * `for_server_b` to avoid memory leaks.
  */
 SECStatus PrioClient_encode(const_PrioConfig cfg, const bool* data_in,
-                            unsigned char** for_server_a, unsigned int* aLen,
-                            unsigned char** for_server_b, unsigned int* bLen);
+                            unsigned char** forServerA, unsigned int* aLen,
+                            unsigned char** forServerB, unsigned int* bLen);
 
 /*
  * Generate a new PRG seed using the NSS global randomness source.
  * Use this routine to initialize the secret that the two Prio servers
  * share.
  */
 SECStatus PrioPRGSeed_randomize(PrioPRGSeed* seed);
 
 /*
  * The PrioServer object holds the state of the Prio servers.
  * Pass in the _same_ secret PRGSeed when initializing the two servers.
  * The PRGSeed must remain secret to the two servers.
  */
-PrioServer PrioServer_new(const_PrioConfig cfg, PrioServerId server_idx,
-                          PrivateKey server_priv,
-                          const PrioPRGSeed server_shared_secret);
+PrioServer PrioServer_new(const_PrioConfig cfg, PrioServerId serverIdx,
+                          PrivateKey serverPriv,
+                          const PrioPRGSeed serverSharedSecret);
 void PrioServer_clear(PrioServer s);
 
 /*
  * After receiving a client packet, each of the servers generate
  * a PrioVerifier object that they use to check whether the client's
  * encoded packet is well formed.
  */
 PrioVerifier PrioVerifier_new(PrioServer s);
@@ -250,21 +281,24 @@ void PrioTotalShare_clear(PrioTotalShare
 SECStatus PrioTotalShare_set_data(PrioTotalShare t, const_PrioServer s);
 
 SECStatus PrioTotalShare_write(const_PrioTotalShare t, msgpack_packer* pk);
 SECStatus PrioTotalShare_read(PrioTotalShare t, msgpack_unpacker* upk,
                               const_PrioConfig cfg);
 
 /*
  * Read the output data into an array of unsigned longs. You should
- * be sure that each data value can fit into a single long and that
- * the pointer `output` points to a buffer large enough to store
- * one long per data field.
+ * be sure that each data value can fit into a single `unsigned long`
+ * and that the pointer `output` points to a buffer large enough to
+ * store one long per data field.
+ *
+ * This function returns failure if some final data value is too
+ * long to fit in an `unsigned long`.
  */
-SECStatus PrioTotalShare_final(const_PrioConfig cfg, unsigned long* output,
+SECStatus PrioTotalShare_final(const_PrioConfig cfg, unsigned long long* output,
                                const_PrioTotalShare tA,
                                const_PrioTotalShare tB);
 
 #endif /* __PRIO_H__ */
 
 #ifdef __cplusplus
 }
 #endif
--- a/third_party/prio/prio/config.c
+++ b/third_party/prio/prio/config.c
@@ -46,16 +46,23 @@ initialize_roots(MPArray arr, const char
     for (unsigned int i = RootWidth, j = 1; i < n_chars; i += RootWidth, j++) {
       MP_CHECK(mp_read_radix(&arr->data[j], &values[i], 16));
     }
   }
 
   return SECSuccess;
 }
 
+int
+PrioConfig_maxDataFields(void)
+{
+  const int n_roots = 1 << Generator2Order;
+  return (n_roots >> 1) - 1;
+}
+
 PrioConfig
 PrioConfig_new(int n_fields, PublicKey server_a, PublicKey server_b,
                const unsigned char* batch_id, unsigned int batch_id_len)
 {
   SECStatus rv = SECSuccess;
   PrioConfig cfg = malloc(sizeof(*cfg));
   if (!cfg)
     return NULL;
@@ -66,20 +73,18 @@ PrioConfig_new(int n_fields, PublicKey s
   cfg->server_b_pub = server_b;
   cfg->num_data_fields = n_fields;
   cfg->n_roots = 1 << Generator2Order;
   MP_DIGITS(&cfg->modulus) = NULL;
   MP_DIGITS(&cfg->inv2) = NULL;
   cfg->roots = NULL;
   cfg->rootsInv = NULL;
 
-  if (cfg->num_data_fields >= cfg->n_roots) {
-    rv = SECFailure;
-    goto cleanup;
-  }
+  P_CHECKCB(cfg->n_roots > 1);
+  P_CHECKCB(cfg->num_data_fields <= PrioConfig_maxDataFields());
 
   P_CHECKA(cfg->batch_id = malloc(batch_id_len));
   strncpy((char*)cfg->batch_id, (char*)batch_id, batch_id_len);
 
   MP_CHECKC(mp_init(&cfg->modulus));
   MP_CHECKC(mp_read_radix(&cfg->modulus, Modulus, 16));
 
   // Compute  2^{-1} modulo M
--- a/third_party/prio/prio/encrypt.c
+++ b/third_party/prio/prio/encrypt.c
@@ -21,25 +21,57 @@
 // Use 96-bit IV
 #define GCM_IV_LEN_BYTES 12
 // Use 128-bit auth tag
 #define GCM_TAG_LEN_BYTES 16
 
 #define PRIO_TAG "PrioPacket"
 #define AAD_LEN (strlen(PRIO_TAG) + CURVE25519_KEY_LEN + GCM_IV_LEN_BYTES)
 
-// The all-zeros curve25519 public key, as DER-encoded SKPI blob.
+// For an example of NSS curve25519 import/export code, see:
+// https://searchfox.org/nss/rev/cfd5fcba7efbfe116e2c08848075240ec3a92718/gtests/pk11_gtest/pk11_curve25519_unittest.cc#66
+
+// The all-zeros curve25519 public key, as DER-encoded SPKI blob.
 static const uint8_t curve25519_spki_zeros[] = {
   0x30, 0x39, 0x30, 0x14, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce, 0x3d, 0x02,
   0x01, 0x06, 0x09, 0x2b, 0x06, 0x01, 0x04, 0x01, 0xda, 0x47, 0x0f, 0x01,
   0x03, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
   0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
 };
 
+// The all-zeros curve25519 private key, as a PKCS#8 blob.
+static const uint8_t curve25519_priv_zeros[] = {
+  0x30, 0x67, 0x02, 0x01, 0x00, 0x30, 0x14, 0x06, 0x07, 0x2a, 0x86, 0x48, 0xce,
+  0x3d, 0x02, 0x01, 0x06, 0x09, 0x2b, 0x06, 0x01, 0x04, 0x01, 0xda, 0x47, 0x0f,
+  0x01, 0x04, 0x4c, 0x30, 0x4a, 0x02, 0x01, 0x01, 0x04, 0x20,
+
+  /* Byte index 36:  32 bytes of curve25519 private key. */
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+  /* misc type fields */
+  0xa1, 0x23, 0x03, 0x21,
+
+  /* Byte index 73:  32 bytes of curve25519 public key. */
+  0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+  0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+// Index into `curve25519_priv_zeros` at which the private key begins.
+static const size_t curve25519_priv_sk_offset = 36;
+// Index into `curve25519_priv_zeros` at which the public key begins.
+static const size_t curve25519_priv_pk_offset = 73;
+
+static SECStatus key_from_hex(
+  unsigned char key_out[CURVE25519_KEY_LEN],
+  const unsigned char hex_in[CURVE25519_KEY_LEN_HEX]);
+
 // Note that we do not use isxdigit because it is locale-dependent
 // See: https://github.com/mozilla/libprio/issues/20
 static inline char
 is_hex_digit(char c)
 {
   return ('0' <= c && c <= '9') || ('a' <= c && c <= 'f') ||
          ('A' <= c && c <= 'F');
 }
@@ -101,16 +133,17 @@ PublicKey_import(PublicKey* pk, const un
   if (dataLen != CURVE25519_KEY_LEN)
     return SECFailure;
 
   P_CHECKA(key_bytes = calloc(dataLen, sizeof(unsigned char)));
   memcpy(key_bytes, data, dataLen);
 
   const int spki_len = sizeof(curve25519_spki_zeros);
   P_CHECKA(spki_data = calloc(spki_len, sizeof(uint8_t)));
+
   memcpy(spki_data, curve25519_spki_zeros, spki_len);
   SECItem spki_item = { siBuffer, spki_data, spki_len };
 
   // Import the all-zeros curve25519 public key.
   P_CHECKA(pkinfo = SECKEY_DecodeDERSubjectPublicKeyInfo(&spki_item));
   P_CHECKA(*pk = SECKEY_ExtractPublicKey(pkinfo));
 
   // Overwrite the all-zeros public key with the 32-byte curve25519 public key
@@ -126,69 +159,215 @@ cleanup:
     SECKEY_DestroySubjectPublicKeyInfo(pkinfo);
 
   if (rv != SECSuccess)
     PublicKey_clear(*pk);
   return rv;
 }
 
 SECStatus
-PublicKey_import_hex(PublicKey* pk, const unsigned char* hex_data,
+PrivateKey_import(PrivateKey* sk, const unsigned char* sk_data,
+                  unsigned int sk_data_len, const unsigned char* pk_data,
+                  unsigned int pk_data_len)
+{
+  if (sk_data_len != CURVE25519_KEY_LEN || !sk_data) {
+    return SECFailure;
+  }
+
+  if (pk_data_len != CURVE25519_KEY_LEN || !pk_data) {
+    return SECFailure;
+  }
+
+  SECStatus rv = SECSuccess;
+  PK11SlotInfo* slot = NULL;
+  uint8_t* zero_priv_data = NULL;
+  *sk = NULL;
+  const int zero_priv_len = sizeof(curve25519_priv_zeros);
+
+  P_CHECKA(slot = PK11_GetInternalSlot());
+
+  P_CHECKA(zero_priv_data = calloc(zero_priv_len, sizeof(uint8_t)));
+  SECItem zero_priv_item = { siBuffer, zero_priv_data, zero_priv_len };
+
+  // Copy the PKCS#8-encoded keypair into writable buffer.
+  memcpy(zero_priv_data, curve25519_priv_zeros, zero_priv_len);
+  // Copy private key into bytes beginning at index `curve25519_priv_sk_offset`.
+  memcpy(zero_priv_data + curve25519_priv_sk_offset, sk_data, sk_data_len);
+  // Copy private key into bytes beginning at index `curve25519_priv_pk_offset`.
+  memcpy(zero_priv_data + curve25519_priv_pk_offset, pk_data, pk_data_len);
+
+  P_CHECKC(PK11_ImportDERPrivateKeyInfoAndReturnKey(
+    slot, &zero_priv_item, NULL, NULL, PR_FALSE, PR_FALSE, KU_ALL, sk, NULL));
+
+cleanup:
+  if (slot) {
+    PK11_FreeSlot(slot);
+  }
+  if (zero_priv_data) {
+    free(zero_priv_data);
+  }
+  if (rv != SECSuccess) {
+    PrivateKey_clear(*sk);
+  }
+  return rv;
+}
+
+SECStatus
+PublicKey_import_hex(PublicKey* pk, const unsigned char* hexData,
                      unsigned int dataLen)
 {
   unsigned char raw_bytes[CURVE25519_KEY_LEN];
 
-  if (dataLen != CURVE25519_KEY_LEN_HEX)
+  if (dataLen != CURVE25519_KEY_LEN_HEX || !hexData) {
     return SECFailure;
-
-  for (unsigned int i = 0; i < dataLen; i++) {
-    if (!is_hex_digit(hex_data[i]))
-      return SECFailure;
   }
 
-  const unsigned char* p = hex_data;
-  for (unsigned int i = 0; i < CURVE25519_KEY_LEN; i++) {
-    uint8_t d0 = hex_to_int(p[0]);
-    uint8_t d1 = hex_to_int(p[1]);
-    raw_bytes[i] = (d0 << 4) | d1;
-    p += 2;
+  if (key_from_hex(raw_bytes, hexData) != SECSuccess) {
+    return SECFailure;
   }
 
   return PublicKey_import(pk, raw_bytes, CURVE25519_KEY_LEN);
 }
 
 SECStatus
-PublicKey_export(const_PublicKey pk, unsigned char data[CURVE25519_KEY_LEN])
+PrivateKey_import_hex(PrivateKey* sk, const unsigned char* privHexData,
+                      unsigned int privDataLen, const unsigned char* pubHexData,
+                      unsigned int pubDataLen)
+{
+  SECStatus rv = SECSuccess;
+  unsigned char raw_priv[CURVE25519_KEY_LEN];
+  unsigned char raw_pub[CURVE25519_KEY_LEN];
+
+  if (privDataLen != CURVE25519_KEY_LEN_HEX ||
+      pubDataLen != CURVE25519_KEY_LEN_HEX) {
+    return SECFailure;
+  }
+
+  if (!privHexData || !pubHexData) {
+    return SECFailure;
+  }
+
+  P_CHECK(key_from_hex(raw_priv, privHexData));
+  P_CHECK(key_from_hex(raw_pub, pubHexData));
+
+  return PrivateKey_import(sk, raw_priv, CURVE25519_KEY_LEN, raw_pub,
+                           CURVE25519_KEY_LEN);
+}
+
+SECStatus
+PublicKey_export(const_PublicKey pk, unsigned char* data, unsigned int dataLen)
+{
+  if (pk == NULL || dataLen != CURVE25519_KEY_LEN) {
+    return SECFailure;
+  }
+
+  const SECItem* key = &pk->u.ec.publicValue;
+  if (key->len != CURVE25519_KEY_LEN) {
+    return SECFailure;
+  }
+
+  memcpy(data, key->data, key->len);
+  return SECSuccess;
+}
+
+SECStatus
+PrivateKey_export(PrivateKey sk, unsigned char* data, unsigned int dataLen)
 {
-  if (pk == NULL)
+  if (sk == NULL || dataLen != CURVE25519_KEY_LEN) {
     return SECFailure;
+  }
 
-  memcpy(data, pk->u.ec.publicValue.data, CURVE25519_KEY_LEN);
+  SECStatus rv = SECSuccess;
+  SECItem item = { siBuffer, NULL, 0 };
+
+  P_CHECKC(PK11_ReadRawAttribute(PK11_TypePrivKey, sk, CKA_VALUE, &item));
+
+  // If the leading bytes of the key are '\0', then this string can be
+  // shorter than `CURVE25519_KEY_LEN` bytes.
+  memset(data, 0, CURVE25519_KEY_LEN);
+  P_CHECKCB(item.len <= CURVE25519_KEY_LEN);
+
+  // Copy into the low-order bytes of the output.
+  const size_t leading_zeros = CURVE25519_KEY_LEN - item.len;
+  memcpy(data + leading_zeros, item.data, item.len);
+
+cleanup:
+  if (item.data != NULL) {
+    SECITEM_ZfreeItem(&item, PR_FALSE);
+  }
+
+  return rv;
+}
+
+static void
+key_to_hex(const unsigned char key_in[CURVE25519_KEY_LEN],
+           unsigned char hex_out[(2 * CURVE25519_KEY_LEN) + 1])
+{
+  const unsigned char* p = key_in;
+  for (unsigned int i = 0; i < CURVE25519_KEY_LEN; i++) {
+    unsigned char bytel = p[0] & 0x0f;
+    unsigned char byteu = (p[0] & 0xf0) >> 4;
+    hex_out[2 * i] = int_to_hex(byteu);
+    hex_out[2 * i + 1] = int_to_hex(bytel);
+    p++;
+  }
+
+  hex_out[2 * CURVE25519_KEY_LEN] = '\0';
+}
+
+static SECStatus
+key_from_hex(unsigned char key_out[CURVE25519_KEY_LEN],
+             const unsigned char hex_in[CURVE25519_KEY_LEN_HEX])
+{
+  for (unsigned int i = 0; i < CURVE25519_KEY_LEN_HEX; i++) {
+    if (!is_hex_digit(hex_in[i]))
+      return SECFailure;
+  }
+
+  const unsigned char* p = hex_in;
+  for (unsigned int i = 0; i < CURVE25519_KEY_LEN; i++) {
+    uint8_t d0 = hex_to_int(p[0]);
+    uint8_t d1 = hex_to_int(p[1]);
+    key_out[i] = (d0 << 4) | d1;
+    p += 2;
+  }
 
   return SECSuccess;
 }
 
 SECStatus
-PublicKey_export_hex(const_PublicKey pk,
-                     unsigned char data[(2 * CURVE25519_KEY_LEN) + 1])
+PublicKey_export_hex(const_PublicKey pk, unsigned char* data,
+                     unsigned int dataLen)
 {
-  unsigned char raw_data[CURVE25519_KEY_LEN];
-  if (PublicKey_export(pk, raw_data) != SECSuccess)
+  if (dataLen != CURVE25519_KEY_LEN_HEX + 1) {
     return SECFailure;
+  }
 
-  const unsigned char* p = raw_data;
-  for (unsigned int i = 0; i < CURVE25519_KEY_LEN; i++) {
-    unsigned char bytel = p[0] & 0x0f;
-    unsigned char byteu = (p[0] & 0xf0) >> 4;
-    data[2 * i] = int_to_hex(byteu);
-    data[2 * i + 1] = int_to_hex(bytel);
-    p++;
+  unsigned char raw_data[CURVE25519_KEY_LEN];
+  if (PublicKey_export(pk, raw_data, sizeof(raw_data)) != SECSuccess) {
+    return SECFailure;
   }
 
-  data[2 * CURVE25519_KEY_LEN] = '\0';
+  key_to_hex(raw_data, data);
+  return SECSuccess;
+}
+
+SECStatus
+PrivateKey_export_hex(PrivateKey sk, unsigned char* data, unsigned int dataLen)
+{
+  if (dataLen != CURVE25519_KEY_LEN_HEX + 1) {
+    return SECFailure;
+  }
+
+  unsigned char raw_data[CURVE25519_KEY_LEN];
+  if (PrivateKey_export(sk, raw_data, sizeof(raw_data)) != SECSuccess) {
+    return SECFailure;
+  }
+
+  key_to_hex(raw_data, data);
   return SECSuccess;
 }
 
 SECStatus
 Keypair_new(PrivateKey* pvtkey, PublicKey* pubkey)
 {
   if (pvtkey == NULL)
     return SECFailure;
@@ -215,21 +394,23 @@ Keypair_new(PrivateKey* pvtkey, PublicKe
   ecp.data[0] = SEC_ASN1_OBJECT_ID;
   ecp.data[1] = oid_data->oid.len;
   memcpy(&ecp.data[2], oid_data->oid.data, oid_data->oid.len);
 
   P_CHECKA(slot = PK11_GetInternalSlot());
   P_CHECKA(*pvtkey = PK11_GenerateKeyPair(slot, CKM_EC_KEY_PAIR_GEN, &ecp,
                                           (SECKEYPublicKey**)pubkey, PR_FALSE,
                                           PR_FALSE, NULL));
-  PK11_FreeSlot(slot);
-
 cleanup:
-  if (ecp.data)
+  if (slot) {
+    PK11_FreeSlot(slot);
+  }
+  if (ecp.data) {
     free(ecp.data);
+  }
   if (rv != SECSuccess) {
     PublicKey_clear(*pubkey);
     PrivateKey_clear(*pvtkey);
   }
   return rv;
 }
 
 void
--- a/third_party/prio/prio/poly.c
+++ b/third_party/prio/prio/poly.c
@@ -69,18 +69,18 @@ fft_interpolate_raw(mp_int* out, const m
 
   P_CHECKA(tmp = MPArray_new(nPoints));
   P_CHECKA(ySub = MPArray_new(nPoints));
   P_CHECKA(rootsSub = MPArray_new(nPoints));
 
   mp_int n_inverse;
   MP_DIGITS(&n_inverse) = NULL;
 
-  MP_CHECK(fft_recurse(out, mod, nPoints, roots, ys, tmp->data, ySub->data,
-                       rootsSub->data));
+  MP_CHECKC(fft_recurse(out, mod, nPoints, roots, ys, tmp->data, ySub->data,
+                        rootsSub->data));
 
   if (invert) {
     MP_CHECKC(mp_init(&n_inverse));
 
     mp_set(&n_inverse, nPoints);
     MP_CHECKC(mp_invmod(&n_inverse, mod, &n_inverse));
     for (int i = 0; i < nPoints; i++) {
       MP_CHECKC(mp_mulmod(&out[i], &n_inverse, mod, &out[i]));
--- a/third_party/prio/prio/prg.c
+++ b/third_party/prio/prio/prg.c
@@ -115,16 +115,42 @@ PRG_get_bytes(PRG prg, unsigned char* by
 
 SECStatus
 PRG_get_int(PRG prg, mp_int* out, const mp_int* max)
 {
   return rand_int_rng(out, max, &PRG_get_bytes_internal, (void*)prg);
 }
 
 SECStatus
+PRG_get_int_range(PRG prg, mp_int* out, const mp_int* lower, const mp_int* max)
+{
+  SECStatus rv;
+  mp_int width;
+  MP_DIGITS(&width) = NULL;
+  MP_CHECKC(mp_init(&width));
+
+  // Compute
+  //    width = max - lower
+  MP_CHECKC(mp_sub(max, lower, &width));
+
+  // Get an integer x in the range [0, width)
+  P_CHECKC(PRG_get_int(prg, out, &width));
+
+  // Set
+  //    out = lower + x
+  // which is in the range [lower, width+lower),
+  // which is              [lower, max).
+  MP_CHECKC(mp_add(lower, out, out));
+
+cleanup:
+  mp_clear(&width);
+  return rv;
+}
+
+SECStatus
 PRG_get_array(PRG prg, MPArray dst, const mp_int* mod)
 {
   SECStatus rv;
   for (int i = 0; i < dst->len; i++) {
     P_CHECK(PRG_get_int(prg, &dst->data[i], mod));
   }
 
   return SECSuccess;
--- a/third_party/prio/prio/prg.h
+++ b/third_party/prio/prio/prg.h
@@ -31,16 +31,23 @@ SECStatus PRG_get_bytes(PRG prg, unsigne
 
 /*
  * Use the PRG output to sample a big integer x in the range
  *    0 <= x < max.
  */
 SECStatus PRG_get_int(PRG prg, mp_int* out, const mp_int* max);
 
 /*
+ * Use the PRG output to sample a big integer x in the range
+ *    lower <= x < max.
+ */
+SECStatus PRG_get_int_range(PRG prg, mp_int* out, const mp_int* lower,
+                            const mp_int* max);
+
+/*
  * Use secret sharing to split the int src into two shares.
  * Use PRG to generate the value `shareB`.
  * The mp_ints must be initialized.
  */
 SECStatus PRG_share_int(PRG prg, mp_int* shareA, const mp_int* src,
                         const_PrioConfig cfg);
 
 /*
--- a/third_party/prio/prio/server.c
+++ b/third_party/prio/prio/server.c
@@ -13,16 +13,23 @@
 
 #include "client.h"
 #include "mparray.h"
 #include "poly.h"
 #include "prg.h"
 #include "server.h"
 #include "util.h"
 
+/* In `PrioTotalShare_final`, we need to be able to store
+ * an `mp_digit` in an `unsigned long long`.
+ */
+#if (MP_DIGIT_MAX > ULLONG_MAX)
+#error "Unsigned long long is not long enough to hold an MP digit"
+#endif
+
 PrioServer
 PrioServer_new(const_PrioConfig cfg, PrioServerId server_idx,
                PrivateKey server_priv, const PrioPRGSeed seed)
 {
   SECStatus rv = SECSuccess;
   PrioServer s = malloc(sizeof(*s));
   if (!s)
     return NULL;
@@ -107,17 +114,17 @@ PrioTotalShare_set_data(PrioTotalShare t
 
   P_CHECK(MPArray_resize(t->data_shares, s->data_shares->len));
   P_CHECK(MPArray_copy(t->data_shares, s->data_shares));
 
   return rv;
 }
 
 SECStatus
-PrioTotalShare_final(const_PrioConfig cfg, unsigned long* output,
+PrioTotalShare_final(const_PrioConfig cfg, unsigned long long* output,
                      const_PrioTotalShare tA, const_PrioTotalShare tB)
 {
   if (tA->data_shares->len != cfg->num_data_fields)
     return SECFailure;
   if (tA->data_shares->len != tB->data_shares->len)
     return SECFailure;
   if (tA->idx != PRIO_SERVER_A || tB->idx != PRIO_SERVER_B)
     return SECFailure;
@@ -127,17 +134,20 @@ PrioTotalShare_final(const_PrioConfig cf
   mp_int tmp;
   MP_DIGITS(&tmp) = NULL;
   MP_CHECKC(mp_init(&tmp));
 
   for (int i = 0; i < cfg->num_data_fields; i++) {
     MP_CHECKC(mp_addmod(&tA->data_shares->data[i], &tB->data_shares->data[i],
                         &cfg->modulus, &tmp));
 
-    output[i] = tmp.dp[0];
+    if (MP_USED(&tmp) > 1) {
+      P_CHECKCB(false);
+    }
+    output[i] = MP_DIGIT(&tmp, 0);
   }
 
 cleanup:
   mp_clear(&tmp);
   return rv;
 }
 
 inline static mp_int*
@@ -173,29 +183,35 @@ get_h_share(const_PrioVerifier v, int i)
  */
 static SECStatus
 compute_shares(PrioVerifier v, const_PrioPacketClient p)
 {
   SECStatus rv;
   const int n = v->s->cfg->num_data_fields + 1;
   const int N = next_power_of_two(n);
   mp_int eval_at;
+  mp_int lower;
   MP_DIGITS(&eval_at) = NULL;
+  MP_DIGITS(&lower) = NULL;
 
   MPArray points_f = NULL;
   MPArray points_g = NULL;
   MPArray points_h = NULL;
 
   MP_CHECKC(mp_init(&eval_at));
+  MP_CHECKC(mp_init(&lower));
   P_CHECKA(points_f = MPArray_new(N));
   P_CHECKA(points_g = MPArray_new(N));
   P_CHECKA(points_h = MPArray_new(2 * N));
 
-  // Use PRG to generate random point
-  MP_CHECKC(PRG_get_int(v->s->prg, &eval_at, &v->s->cfg->modulus));
+  // Use PRG to generate random point. Per Appendix D.2 of full version of
+  // Prio paper, this value must be in the range
+  //      [n+1, modulus).
+  mp_set(&lower, n + 1);
+  P_CHECKC(PRG_get_int_range(v->s->prg, &eval_at, &lower, &v->s->cfg->modulus));
 
   // Reduce value into the field we're using. This
   // doesn't yield exactly a uniformly random point,
   // but for values this large, it will be close
   // enough.
   MP_CHECKC(mp_mod(&eval_at, &v->s->cfg->modulus, &eval_at));
 
   // Client sends us the values of f(0) and g(0)
@@ -228,16 +244,17 @@ compute_shares(PrioVerifier v, const_Pri
   P_CHECKC(poly_interp_evaluate(&v->share_gR, points_g, &eval_at, v->s->cfg));
   P_CHECKC(poly_interp_evaluate(&v->share_hR, points_h, &eval_at, v->s->cfg));
 
 cleanup:
   MPArray_clear(points_f);
   MPArray_clear(points_g);
   MPArray_clear(points_h);
   mp_clear(&eval_at);
+  mp_clear(&lower);
   return rv;
 }
 
 PrioVerifier
 PrioVerifier_new(PrioServer s)
 {
   SECStatus rv = SECSuccess;
   PrioVerifier v = malloc(sizeof *v);
--- a/third_party/prio/update.sh
+++ b/third_party/prio/update.sh
@@ -1,16 +1,16 @@
 #!/bin/sh
 
 # Script to update the mozilla in-tree copy of the libprio library.
 # Run this within the /third_party/libprio directory of the source tree.
 
 MY_TEMP_DIR=`mktemp -d -t libprio_update.XXXXXX` || exit 1
 
-COMMIT="02a81fb652d385d0f4f10989d051317097ab55fb"
+COMMIT="a95cfdd5eaf7104582709c54ef23395d24d7f7fd"
 
 git clone -n https://github.com/mozilla/libprio ${MY_TEMP_DIR}/libprio
 git -C ${MY_TEMP_DIR}/libprio checkout ${COMMIT}
 
 FILES="include prio"
 VERSION=$(git -C ${MY_TEMP_DIR}/libprio describe --tags)
 perl -p -i -e "s/Current version: \S+ \[commit [0-9a-f]{40}\]/Current version: ${VERSION} [commit ${COMMIT}]/" README-mozilla
 
new file mode 100644
--- /dev/null
+++ b/toolkit/components/telemetry/tests/unit/test_SocketScalars.js
@@ -0,0 +1,46 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/
+*/
+
+ChromeUtils.import("resource://gre/modules/Services.jsm");
+ChromeUtils.import("resource://gre/modules/TelemetryController.jsm");
+ChromeUtils.import("resource://testing-common/ContentTaskUtils.jsm");
+
+const SOCKET_ONLY_UINT_SCALAR = "telemetry.test.socket_only_uint";
+
+/**
+ * This function waits until socket scalars are reported into the
+ * scalar snapshot.
+ */
+async function waitForSocketScalars() {
+  await ContentTaskUtils.waitForCondition(() => {
+    const scalars = Telemetry.getSnapshotForScalars("main", false);
+    return Object.keys(scalars).includes("socket");
+  });
+}
+
+add_task(async function() {
+  if (!Services.prefs.getBoolPref("network.process.enabled")) {
+    Assert.ok(true, "Test finished: no point to test telemetry from socket process with lanuching the process");
+    return;
+  }
+
+  do_test_pending();
+
+  do_get_profile(true);
+  await TelemetryController.testSetup();
+
+  Services.netUtils.socketProcessTelemetryPing();
+
+  // Once scalars are set by the socket process, they don't immediately get
+  // sent to the parent process. Wait for the Telemetry IPC Timer to trigger
+  // and batch send the data back to the parent process.
+  await waitForSocketScalars();
+
+  Assert.equal(Telemetry.getSnapshotForScalars("main", false)
+                   .socket[SOCKET_ONLY_UINT_SCALAR],
+               42,
+               `${SOCKET_ONLY_UINT_SCALAR} must have the correct value (socket process).`);
+  do_test_finished();
+});
+
--- a/toolkit/components/telemetry/tests/unit/xpcshell.ini
+++ b/toolkit/components/telemetry/tests/unit/xpcshell.ini
@@ -62,16 +62,18 @@ skip-if = (verify && debug && os == 'lin
 skip-if = os == "android"
 [test_TelemetrySession_activeTicks.js]
 [test_TelemetrySend.js]
 [test_ChildHistograms.js]
 skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
 tags = addons
 [test_ChildScalars.js]
 skip-if = os == "android" # Disabled due to crashes (see bug 1331366)
+[test_SocketScalars.js]
+skip-if = os == "android"
 [test_TelemetryReportingPolicy.js]
 skip-if = os == "android" # Disabled due to crashes (see bug 1367762)
 tags = addons
 [test_TelemetryScalars.js]
 [test_TelemetryScalars_buildFaster.js]
 [test_TelemetryScalars_multistore.js]
 [test_TelemetryTimestamps.js]
 skip-if = toolkit == 'android'
--- a/toolkit/content/aboutSupport.js
+++ b/toolkit/content/aboutSupport.js
@@ -53,16 +53,29 @@ var snapshotFormatters = {
     if (data.vendor)
       version += " (" + data.vendor + ")";
     $("version-box").textContent = version;
     $("buildid-box").textContent = data.buildID;
     if (data.updateChannel)
       $("updatechannel-box").textContent = data.updateChannel;
     $("profile-dir-box").textContent = Services.dirsvc.get("ProfD", Ci.nsIFile).path;
 
+    try {
+      let launcherStatusTextId = "launcher-process-status-unknown";
+      switch (data.launcherProcessState) {
+        case 0:
+        case 1:
+        case 2:
+          launcherStatusTextId = "launcher-process-status-" + data.launcherProcessState;
+          break;
+      }
+
+      document.l10n.setAttributes($("launcher-process-box"), launcherStatusTextId);
+    } catch (e) {}
+
     let statusTextId = "multi-process-status-unknown";
 
     // Whitelist of known values with string descriptions:
     switch (data.autoStartStatus) {
       case 0:
       case 1:
       case 2:
       case 4:
--- a/toolkit/content/aboutSupport.xhtml
+++ b/toolkit/content/aboutSupport.xhtml
@@ -158,16 +158,25 @@
           <tr class="no-copy">
             <th class="column" data-l10n-id="app-basics-service-workers"/>
 
             <td>
               <a href="about:serviceworkers">about:serviceworkers</a>
             </td>
           </tr>
 
+#if defined(XP_WIN) && defined(MOZ_LAUNCHER_PROCESS)
+          <tr>
+            <th class="column" data-l10n-id="app-basics-launcher-process-status"/>
+
+            <td id="launcher-process-box">
+            </td>
+          </tr>
+#endif
+
           <tr>
             <th class="column" data-l10n-id="app-basics-multi-process-support"/>
 
             <td id="multiprocess-box">
               <span id="multiprocess-box-process-count"/>
               <span id="multiprocess-box-status" data-l10n-id="multi-process-status-unknown"/>
             </td>
           </tr>
--- a/toolkit/locales/en-US/toolkit/about/aboutSupport.ftl
+++ b/toolkit/locales/en-US/toolkit/about/aboutSupport.ftl
@@ -43,16 +43,17 @@ app-basics-profile-dir =
 app-basics-enabled-plugins = Enabled Plugins
 app-basics-build-config = Build Configuration
 app-basics-user-agent = User Agent
 app-basics-os = OS
 app-basics-memory-use = Memory Use
 app-basics-performance = Performance
 app-basics-service-workers = Registered Service Workers
 app-basics-profiles = Profiles
+app-basics-launcher-process-status = Launcher Process
 app-basics-multi-process-support = Multiprocess Windows
 app-basics-process-count = Web Content Processes
 app-basics-enterprise-policies = Enterprise Policies
 app-basics-key-google = Google Key
 app-basics-key-mozilla = Mozilla Location Service Key
 app-basics-safe-mode = Safe Mode
 show-dir-label =
     { PLATFORM() ->
@@ -255,16 +256,21 @@ has-privileged-user-namespaces = User Na
 can-sandbox-content = Content Process Sandboxing
 can-sandbox-media = Media Plugin Sandboxing
 content-sandbox-level = Content Process Sandbox Level
 effective-content-sandbox-level = Effective Content Process Sandbox Level
 sandbox-proc-type-content = content
 sandbox-proc-type-file = file content
 sandbox-proc-type-media-plugin = media plugin
 
+launcher-process-status-0 = Enabled
+launcher-process-status-1 = Disabled due to failure
+launcher-process-status-2 = Disabled forcibly
+launcher-process-status-unknown = Unknown status
+
 # Variables
 # $remoteWindows (integer) - Number of remote windows
 # $totalWindows (integer) - Number of total windows
 multi-process-windows = { $remoteWindows }/{ $totalWindows }
 multi-process-status-0 = Enabled by user
 multi-process-status-1 = Enabled by default
 multi-process-status-2 = Disabled
 multi-process-status-4 = Disabled by accessibility tools
--- a/toolkit/modules/Troubleshoot.jsm
+++ b/toolkit/modules/Troubleshoot.jsm
@@ -192,16 +192,20 @@ var dataProviders = {
       data.numTotalWindows++;
       let remote = docShell.QueryInterface(Ci.nsILoadContext)
                    .useRemoteTabs;
       if (remote) {
         data.numRemoteWindows++;
       }
     }
 
+    try {
+      data.launcherProcessState = Services.appinfo.launcherProcessState;
+    } catch (e) {}
+
     data.remoteAutoStart = Services.appinfo.browserTabsRemoteAutostart;
 
     // Services.ppmm.childCount is a count of how many processes currently
     // exist that might respond to messages sent through the ppmm, including
     // the parent process. So we subtract the parent process with the "- 1",
     // and that’s how many content processes we’re waiting for.
     data.currentContentProcesses = Services.ppmm.childCount - 1;
     data.maxContentProcesses = Services.appinfo.maxWebProcessCount;
--- a/toolkit/modules/tests/browser/browser_Troubleshoot.js
+++ b/toolkit/modules/tests/browser/browser_Troubleshoot.js
@@ -120,16 +120,19 @@ const SNAPSHOT_SCHEMA = {
           type: "string",
         },
         updateChannel: {
           type: "string",
         },
         supportURL: {
           type: "string",
         },
+        launcherProcessState: {
+          type: "number",
+        },
         remoteAutoStart: {
           type: "boolean",
           required: true,
         },
         autoStartStatus: {
           type: "number",
         },
         numTotalWindows: {
--- a/toolkit/mozapps/installer/windows/nsis/common.nsh
+++ b/toolkit/mozapps/installer/windows/nsis/common.nsh
@@ -8397,8 +8397,76 @@ end:
     System::Call "kernel32::CloseHandle(i $1)"
   ${EndIf}
   System::Free $2
 
   Pop $2
   Pop $1
   Pop $0
 !macroend
+
+Function WriteRegQWORD
+          ; Stack contents:
+          ; VALUE, VALUE_NAME, SUBKEY, ROOTKEY
+  Exch $3 ; $3, VALUE_NAME, SUBKEY, ROOTKEY
+  Exch 1  ; VALUE_NAME, $3, SUBKEY, ROOTKEY
+  Exch $2 ; $2, $3, SUBKEY, ROOTKEY
+  Exch 2  ; SUBKEY, $3, $2, ROOTKEY
+  Exch $1 ; $1, $3, $2, ROOTKEY
+  Exch 3  ; ROOTKEY, $3, $2, $1
+  Exch $0 ; $0, $3, $2, $1
+  System::Call "advapi32::RegSetKeyValueW(p r0, w r1, w r2, i 11, *l r3, i 8) i.r0"
+  ${IfNot} $0 = 0
+    SetErrors
+  ${EndIf}
+  Pop $0
+  Pop $3
+  Pop $2
+  Pop $1
+FunctionEnd
+!macro WriteRegQWORD ROOTKEY SUBKEY VALUE_NAME VALUE
+  ${If} "${ROOTKEY}" == "HKCR"
+    Push 0x80000000
+  ${ElseIf} "${ROOTKEY}" == "HKCU"
+    Push 0x80000001
+  ${ElseIf} "${ROOTKEY}" == "HKLM"
+    Push 0x80000002
+  ${Endif}
+  Push "${SUBKEY}"
+  Push "${VALUE_NAME}"
+  System::Int64Op ${VALUE} + 0 ; The result is pushed on the stack
+  Call WriteRegQWORD
+!macroend
+!define WriteRegQWORD "!insertmacro WriteRegQWORD"
+
+Function ReadRegQWORD
+          ; Stack contents:
+          ; VALUE_NAME, SUBKEY, ROOTKEY
+  Exch $2 ; $2, SUBKEY, ROOTKEY
+  Exch 1  ; SUBKEY, $2, ROOTKEY
+  Exch $1 ; $1, $2, ROOTKEY
+  Exch 2  ; ROOTKEY, $2, $1
+  Exch $0 ; $0, $2, $1
+  System::Call "advapi32::RegGetValueW(p r0, w r1, w r2, i 0x48, p 0, *l s, *i 8) i.r0"
+  ${IfNot} $0 = 0
+    SetErrors
+  ${EndIf}
+          ; VALUE, $0, $2, $1
+  Exch 3  ; $1, $0, $2, VALUE
+  Pop $1  ; $0, $2, VALUE
+  Pop $0  ; $2, VALUE
+  Pop $2  ; VALUE
+FunctionEnd
+!macro ReadRegQWORD DEST ROOTKEY SUBKEY VALUE_NAME
+  ${If} "${ROOTKEY}" == "HKCR"
+    Push 0x80000000
+  ${ElseIf} "${ROOTKEY}" == "HKCU"
+    Push 0x80000001
+  ${ElseIf} "${ROOTKEY}" == "HKLM"
+    Push 0x80000002
+  ${Endif}
+  Push "${SUBKEY}"
+  Push "${VALUE_NAME}"
+  Call ReadRegQWORD
+  Pop ${DEST}
+!macroend
+!define ReadRegQWORD "!insertmacro ReadRegQWORD"
+
--- a/toolkit/pluginproblem/jar.mn
+++ b/toolkit/pluginproblem/jar.mn
@@ -1,9 +1,8 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 toolkit.jar:
 % content pluginproblem %pluginproblem/ contentaccessible=yes
   pluginproblem/pluginProblem.xml                 (content/pluginProblem.xml)
   pluginproblem/pluginProblemContent.css          (content/pluginProblemContent.css)
-  pluginproblem/pluginProblemBinding.css          (content/pluginProblemBinding.css)
--- a/toolkit/pluginproblem/moz.build
+++ b/toolkit/pluginproblem/moz.build
@@ -1,11 +1,7 @@
 # -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
 # vim: set filetype=python:
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-EXTRA_COMPONENTS += [
-    'pluginGlue.manifest',
-]
-
 JAR_MANIFESTS += ['jar.mn']
deleted file mode 100644
--- a/toolkit/pluginproblem/pluginGlue.manifest
+++ /dev/null
@@ -1,1 +0,0 @@
-category agent-style-sheets pluginGlue-pluginProblem chrome://pluginproblem/content/pluginProblemBinding.css
--- a/toolkit/xre/LauncherRegistryInfo.h
+++ b/toolkit/xre/LauncherRegistryInfo.h
@@ -21,20 +21,20 @@
  */
 
 namespace mozilla {
 
 class LauncherRegistryInfo final {
  public:
   enum class ProcessType { Launcher, Browser };
 
-  enum class EnabledState {
-    Enabled,
-    FailDisabled,
-    ForceDisabled,
+  enum class EnabledState : uint32_t {
+    Enabled = 0,
+    FailDisabled = 1,
+    ForceDisabled = 2,
   };
 
   LauncherRegistryInfo() : mBinPath(GetFullBinaryPath().get()) {}
 
   LauncherVoidResult ReflectPrefToRegistry(const bool aEnable);
   LauncherResult<EnabledState> IsEnabled();
   LauncherResult<ProcessType> Check(const ProcessType aDesiredType);
   LauncherVoidResult DisableDueToFailure();
--- a/toolkit/xre/nsAppRunner.cpp
+++ b/toolkit/xre/nsAppRunner.cpp
@@ -940,16 +940,34 @@ nsXULAppInfo::GetWindowsDLLBlocklistStat
 }
 
 NS_IMETHODIMP
 nsXULAppInfo::GetRestartedByOS(bool* aResult) {
   *aResult = gRestartedByOS;
   return NS_OK;
 }
 
+NS_IMETHODIMP
+nsXULAppInfo::GetLauncherProcessState(uint32_t* aResult) {
+#if defined(XP_WIN) && defined(MOZ_LAUNCHER_PROCESS)
+  LauncherRegistryInfo launcherInfo;
+
+  LauncherResult<LauncherRegistryInfo::EnabledState> state =
+      launcherInfo.IsEnabled();
+  if (state.isErr()) {
+    return NS_ERROR_UNEXPECTED;
+  }
+
+  *aResult = static_cast<uint32_t>(state.unwrap());
+  return NS_OK;
+#else
+  return NS_ERROR_NOT_AVAILABLE;
+#endif
+}
+
 #ifdef XP_WIN
 // Matches the enum in WinNT.h for the Vista SDK but renamed so that we can
 // safely build with the Vista SDK and without it.
 typedef enum {
   VistaTokenElevationTypeDefault = 1,
   VistaTokenElevationTypeFull,
   VistaTokenElevationTypeLimited
 } VISTA_TOKEN_ELEVATION_TYPE;
--- a/uriloader/exthandler/mac/nsOSHelperAppService.h
+++ b/uriloader/exthandler/mac/nsOSHelperAppService.h
@@ -14,17 +14,16 @@
 
 #include "nsExternalHelperAppService.h"
 #include "nsCExternalHandlerService.h"
 #include "nsMIMEInfoImpl.h"
 #include "nsCOMPtr.h"
 
 class nsOSHelperAppService : public nsExternalHelperAppService {
  public:
-  nsOSHelperAppService();
   virtual ~nsOSHelperAppService();
 
   // override nsIExternalProtocolService methods
   NS_IMETHOD GetApplicationDescription(const nsACString& aScheme,
                                        nsAString& _retval) override;
 
   // method overrides --> used to hook the mime service into internet config....
   NS_IMETHOD GetFromTypeAndExtension(const nsACString& aType,
@@ -47,14 +46,11 @@ class nsOSHelperAppService : public nsEx
   //                     spec, a unix path or a windows path depending on the
   //                     platform
   // aFile --> an nsIFile representation of that platform application path.
   virtual MOZ_MUST_USE nsresult GetFileTokenForPath(
       const char16_t* platformAppPath, nsIFile** aFile) override;
 
   MOZ_MUST_USE nsresult OSProtocolHandlerExists(const char* aScheme,
                                                 bool* aHandlerExists) override;
-
- private:
-  uint32_t mPermissions;
 };
 
 #endif  // nsOSHelperAppService_h__
--- a/uriloader/exthandler/mac/nsOSHelperAppService.mm
+++ b/uriloader/exthandler/mac/nsOSHelperAppService.mm
@@ -49,23 +49,16 @@ using mozilla::LogLevel;
 }
 
 + (NSURLFileTypeMappings*)sharedMappings;
 - (NSString*)MIMETypeForExtension:(NSString*)aString;
 - (NSString*)preferredExtensionForMIMEType:(NSString*)aString;
 - (NSArray*)extensionsForMIMEType:(NSString*)aString;
 @end
 
-nsOSHelperAppService::nsOSHelperAppService() : nsExternalHelperAppService()
-{
-  mode_t mask = umask(0777);
-  umask(mask);
-  mPermissions = 0666 & ~mask;
-}
-
 nsOSHelperAppService::~nsOSHelperAppService()
 {}
 
 nsresult nsOSHelperAppService::OSProtocolHandlerExists(const char * aProtocolScheme, bool * aHandlerExists)
 {
   // CFStringCreateWithBytes() can fail even if we're not out of memory --
   // for example if the 'bytes' parameter is something very wierd (like "~"
   // aka "\xFF\xFF~"), or possibly if it can't be interpreted as using what's
--- a/uriloader/exthandler/unix/nsOSHelperAppService.cpp
+++ b/uriloader/exthandler/unix/nsOSHelperAppService.cpp
@@ -44,22 +44,16 @@ static nsresult ParseMIMEType(const nsAS
                               nsAString::const_iterator& aMajorTypeStart,
                               nsAString::const_iterator& aMajorTypeEnd,
                               nsAString::const_iterator& aMinorTypeStart,
                               nsAString::const_iterator& aMinorTypeEnd,
                               const nsAString::const_iterator& aEnd_iter);
 
 inline bool IsNetscapeFormat(const nsACString& aBuffer);
 
-nsOSHelperAppService::nsOSHelperAppService() : nsExternalHelperAppService() {
-  mode_t mask = umask(0777);
-  umask(mask);
-  mPermissions = 0666 & ~mask;
-}
-
 nsOSHelperAppService::~nsOSHelperAppService() {}
 
 /*
  * Take a command with all the mailcap escapes in it and unescape it
  * Ideally this needs the mime type, mime type options, and location of the
  * temporary file, but this last can't be got from here
  */
 // static
--- a/uriloader/exthandler/unix/nsOSHelperAppService.h
+++ b/uriloader/exthandler/unix/nsOSHelperAppService.h
@@ -16,17 +16,16 @@
 #include "nsCExternalHandlerService.h"
 #include "nsMIMEInfoImpl.h"
 #include "nsCOMPtr.h"
 
 class nsILineInputStream;
 
 class nsOSHelperAppService : public nsExternalHelperAppService {
  public:
-  nsOSHelperAppService();
   virtual ~nsOSHelperAppService();
 
   // method overrides for mime.types and mime.info look up steps
   already_AddRefed<nsIMIMEInfo> GetMIMEInfoFromOS(const nsACString& aMimeType,
                                                   const nsACString& aFileExt,
                                                   bool* aFound) override;
   NS_IMETHOD GetProtocolHandlerInfoFromOS(const nsACString& aScheme,
                                           bool* found,
@@ -47,18 +46,16 @@ class nsOSHelperAppService : public nsEx
   virtual nsresult GetFileTokenForPath(const char16_t* platformAppPath,
                                        nsIFile** aFile) override;
 
  protected:
   already_AddRefed<nsMIMEInfoBase> GetFromType(const nsCString& aMimeType);
   already_AddRefed<nsMIMEInfoBase> GetFromExtension(const nsCString& aFileExt);
 
  private:
-  uint32_t mPermissions;
-
   // Helper methods which have to access static members
   static nsresult UnescapeCommand(const nsAString& aEscapedCommand,
                                   const nsAString& aMajorType,
                                   const nsAString& aMinorType,
                                   nsACString& aUnEscapedCommand);
   static nsresult GetFileLocation(const char* aPrefName,
                                   const char* aEnvVarName,
                                   nsAString& aFileLocation);
--- a/xpcom/system/nsIXULRuntime.idl
+++ b/xpcom/system/nsIXULRuntime.idl
@@ -197,9 +197,15 @@ interface nsIXULRuntime : nsISupports
    */
   readonly attribute boolean windowsDLLBlocklistStatus;
 
   /**
    * True if this application was started by the OS as part of an automatic
    * restart mechanism (such as RegisterApplicationRestart on Windows).
    */
   readonly attribute boolean restartedByOS;
+
+  /**
+   * Returns a value corresponding to one of the
+   * |mozilla::LauncherRegistryInfo::EnabledState| values.
+   */
+  readonly attribute uint32_t launcherProcessState;
 };