Merge autoland to mozilla-central r=merge a=merge
authorCosmin Sabou <csabou@mozilla.com>
Wed, 20 Dec 2017 23:40:31 +0200
changeset 396994 1fc9f886516a3c96edcedda5d9e17183bebd6f8b
parent 396980 cd50ddedbd7c4b4d976b4c290dcc1b1e20cdf66c (current diff)
parent 396993 62b281c39548aa349fd1141caed5d4340700bbb6 (diff)
child 397027 62dd5404cf55e29412d5fff8fe9105076b1ca437
push id33121
push usercsabou@mozilla.com
push dateWed, 20 Dec 2017 21:40:57 +0000
treeherdermozilla-central@1fc9f886516a [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge, merge
milestone59.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge autoland to mozilla-central r=merge a=merge
--- a/browser/components/uitour/UITour-lib.js
+++ b/browser/components/uitour/UITour-lib.js
@@ -590,16 +590,43 @@ if (typeof Mozilla == "undefined") {
   Mozilla.UITour.showFirefoxAccounts = function(extraURLCampaignParams, email) {
     _sendEvent("showFirefoxAccounts", {
       extraURLCampaignParams: JSON.stringify(extraURLCampaignParams),
       email
     });
   };
 
   /**
+   * Request the browser open the "Connect Another Device" Firefox Accounts page.
+   *
+   * @param {Object} extraURLCampaignParams - An object containing additional
+   * parameters for the URL opened by the browser for reasons of promotional
+   * campaign tracking. Each attribute of the object must have a name that
+   * is a string, begins with "utm_" and contains only only alphanumeric
+   * characters, dashes or underscores. The values may be any string and will
+   * automatically be encoded.
+   * @since 59
+   * @example
+   * // Will open https://accounts.firefox.com/connect_another_device?entrypoint=uitour
+   * Mozilla.UITour.showConnectAnotherDevice();
+   * @example
+   * // Will open:
+   * // https://accounts.firefox.com/connect_another_device?entrypoint=uitour&utm_foo=bar&utm_bar=baz
+   * Mozilla.UITour.showConnectAnotherDevice({
+   *   'utm_foo': 'bar',
+   *   'utm_bar': 'baz'
+   * });
+   */
+  Mozilla.UITour.showConnectAnotherDevice = function(extraURLCampaignParams) {
+    _sendEvent("showConnectAnotherDevice", {
+      extraURLCampaignParams: JSON.stringify(extraURLCampaignParams)
+    });
+  };
+
+  /**
    * Show a profile refresh/reset dialog, allowing users to choose to reomve
    * add-ons and customizations as well as restore browser defaults, if possible.
    * `getConfiguration('canReset')` should first be used to determine whether
    * Refresh/Reset is possible for the user's build/profile.
    * @since 48
    * @see Mozilla.UITour.Configuration.CanReset
    */
   Mozilla.UITour.resetFirefox = function() {
--- a/browser/components/uitour/UITour.jsm
+++ b/browser/components/uitour/UITour.jsm
@@ -564,16 +564,30 @@ this.UITour = {
           }
 
           // We want to replace the current tab.
           browser.loadURI(url.href);
         });
         break;
       }
 
+      case "showConnectAnotherDevice": {
+        const url = new URL(Services.prefs.getCharPref("identity.fxaccounts.remote.connectdevice.uri"));
+        url.searchParams.append("entrypoint", "uitour");
+        // Call our helper to validate extraURLCampaignParams and populate URLSearchParams
+        if (!this._populateCampaignParams(url, data.extraURLCampaignParams)) {
+          log.warn("showConnectAnotherDevice: invalid campaign args specified");
+          return false;
+        }
+
+        // We want to replace the current tab.
+        browser.loadURI(url.href);
+        break;
+      }
+
       case "resetFirefox": {
         // Open a reset profile dialog window.
         if (ResetProfile.resetSupported()) {
           ResetProfile.openConfirmationDialog(window);
         }
         break;
       }
 
--- a/browser/extensions/formautofill/FormAutofillPreferences.jsm
+++ b/browser/extensions/formautofill/FormAutofillPreferences.jsm
@@ -86,16 +86,19 @@ FormAutofillPreferences.prototype = {
     formAutofillGroup.id = "formAutofillGroup";
     addressAutofill.id = "addressAutofill";
     addressAutofillLearnMore.id = "addressAutofillLearnMore";
 
     addressAutofill.setAttribute("data-subcategory", "address-autofill");
     addressAutofillLearnMore.setAttribute("value", this.bundle.GetStringFromName("learnMoreLabel"));
     addressAutofillCheckbox.setAttribute("label", this.bundle.GetStringFromName("autofillAddressesCheckbox"));
     savedAddressesBtn.setAttribute("label", this.bundle.GetStringFromName("savedAddressesBtnLabel"));
+    // Align the start to keep the savedAddressesBtn as original size
+    // when addressAutofillCheckboxGroup's height is changed by a longer l10n string
+    savedAddressesBtnWrapper.setAttribute("align", "start");
 
     addressAutofillLearnMore.setAttribute("href", learnMoreURL);
 
     // Add preferences search support
     savedAddressesBtn.setAttribute("searchkeywords", MANAGE_ADDRESSES_KEYWORDS.concat(EDIT_ADDRESS_KEYWORDS)
                                                        .map(key => this.bundle.GetStringFromName(key)).join("\n"));
 
     // Manually set the checked state
@@ -130,16 +133,19 @@ FormAutofillPreferences.prototype = {
 
       creditCardAutofill.id = "creditCardAutofill";
       creditCardAutofillLearnMore.id = "creditCardAutofillLearnMore";
 
       creditCardAutofill.setAttribute("data-subcategory", "credit-card-autofill");
       creditCardAutofillLearnMore.setAttribute("value", this.bundle.GetStringFromName("learnMoreLabel"));
       creditCardAutofillCheckbox.setAttribute("label", this.bundle.GetStringFromName("autofillCreditCardsCheckbox"));
       savedCreditCardsBtn.setAttribute("label", this.bundle.GetStringFromName("savedCreditCardsBtnLabel"));
+      // Align the start to keep the savedCreditCardsBtn as original size
+      // when creditCardAutofillCheckboxGroup's height is changed by a longer l10n string
+      savedCreditCardsBtnWrapper.setAttribute("align", "start");
 
       creditCardAutofillLearnMore.setAttribute("href", learnMoreURL);
 
       // Add preferences search support
       savedCreditCardsBtn.setAttribute("searchkeywords", MANAGE_CREDITCARDS_KEYWORDS.concat(EDIT_CREDITCARD_KEYWORDS)
                                                            .map(key => this.bundle.GetStringFromName(key)).join("\n"));
 
       // Manually set the checked state
--- a/browser/extensions/onboarding/content/onboarding-tour-agent.js
+++ b/browser/extensions/onboarding/content/onboarding-tour-agent.js
@@ -52,16 +52,19 @@ let onClick = evt => {
       Mozilla.UITour.showMenu("urlbar");
       break;
     case "onboarding-tour-sync-button":
       let emailInput = document.getElementById("onboarding-tour-sync-email-input");
       if (emailInput.checkValidity()) {
         Mozilla.UITour.showFirefoxAccounts(null, emailInput.value);
       }
       break;
+    case "onboarding-tour-sync-connect-device-button":
+      Mozilla.UITour.showConnectAnotherDevice();
+      break;
   }
   let classList = evt.target.classList;
   // On keyboard navigation the target would be .onboarding-tour-item.
   // On mouse clicking the target would be .onboarding-tour-item-container.
   if (classList.contains("onboarding-tour-item") || classList.contains("onboarding-tour-item-container")) {
     Mozilla.UITour.hideHighlight(); // Clean up UITour if a user tries to change to other tours.
   }
 };
--- a/browser/extensions/onboarding/content/onboarding.css
+++ b/browser/extensions/onboarding/content/onboarding.css
@@ -351,34 +351,23 @@
 }
 
 .onboarding-tour-content > iframe {
   width: 100%;
   height: 100%;
   border: none;
 }
 
-.onboarding-tour-page.onboarding-no-button > .onboarding-tour-content {
-  grid-row: tour-page-start / tour-page-end;
-  grid-column: tour-content-start / tour-page-end;
-}
-
 .onboarding-tour-button-container {
   /* Get higher z-index in order to ensure buttons within container are selectable */
   z-index: 2;
   grid-row: tour-button-start / tour-page-end;
   grid-column: tour-content-start / tour-page-end;
 }
 
-.onboarding-tour-page.onboarding-no-button > .onboarding-tour-button-container {
-  display: none;
-  grid-row: tour-page-end;
-  grid-column: tour-page-end;
-}
-
 .onboarding-tour-action-button {
   background: #0060df;
   /* With 1px transparent border, could see a border in the high-constrast mode */
   border: 1px solid transparent;
   border-radius: 2px;
   padding: 10px 20px;
   font-size: 14px;
   font-weight: 600;
--- a/browser/extensions/onboarding/content/onboarding.js
+++ b/browser/extensions/onboarding/content/onboarding.js
@@ -38,17 +38,16 @@ const ICON_STATE_DEFAULT = "default";
  *   // The method returing strings used on tour notification
  *   getNotificationStrings(bundle):
  *     - title: // The string of tour notification title
  *     - message: // The string of tour notification message
  *     - button: // The string of tour notification action button title
  *   // Return a div appended with elements for this tours.
  *   // Each tour should contain the following 3 sections in the div:
  *   // .onboarding-tour-description, .onboarding-tour-content, .onboarding-tour-button-container.
- *   // Add onboarding-no-button css class in the div if this tour does not need a button container.
  *   // If there was a .onboarding-tour-action-button present and was clicked, tour would be marked as completed.
  *   getPage() {},
  * },
  **/
 var onboardingTourset = {
   "private": {
     id: "onboarding-tour-private-browsing",
     tourNameId: "onboarding.tour-private-browsing",
@@ -179,17 +178,16 @@ var onboardingTourset = {
         message: bundle.GetStringFromName("onboarding.notification.onboarding-tour-sync.message"),
         button: bundle.GetStringFromName("onboarding.button.learnMore"),
       };
     },
     getPage(win, bundle) {
       const STATE_LOGOUT = "logged-out";
       const STATE_LOGIN = "logged-in";
       let div = win.document.createElement("div");
-      div.classList.add("onboarding-no-button");
       div.dataset.loginState = STATE_LOGOUT;
       // The email validation pattern used in the form comes from IETF rfc5321,
       // which is identical to server-side checker of Firefox Account. See
       // discussion in https://bugzilla.mozilla.org/show_bug.cgi?id=1378770#c6
       // for detail.
       let emailRegex = "^[\\w.!#$%&’*+\\/=?^`{|}~-]{1,64}@[a-z\\d](?:[a-z\\d-]{0,253}[a-z\\d])?(?:\\.[a-z\\d](?:[a-z\\d-]{0,253}[a-z\\d])?)+$";
       div.innerHTML = `
         <section class="onboarding-tour-description">
@@ -202,16 +200,19 @@ var onboardingTourset = {
           <form class="show-on-logged-out">
             <h3 data-l10n-id="onboarding.tour-sync.form.title"></h3>
             <p data-l10n-id="onboarding.tour-sync.form.description"></p>
             <input id="onboarding-tour-sync-email-input" type="email" required="true"></input><br />
             <button id="onboarding-tour-sync-button" class="onboarding-tour-action-button" data-l10n-id="onboarding.tour-sync.button"></button>
           </form>
           <img src="resource://onboarding/img/figure_sync.svg" role="presentation"/>
         </section>
+        <aside class="onboarding-tour-button-container show-on-logged-in">
+          <button id="onboarding-tour-sync-connect-device-button" class="onboarding-tour-action-button" data-l10n-id="onboarding.tour-sync.connect-device.button"></button>
+        </aside>
       `;
       let emailInput = div.querySelector("#onboarding-tour-sync-email-input");
       emailInput.placeholder =
         bundle.GetStringFromName("onboarding.tour-sync.email-input.placeholder");
       emailInput.pattern = emailRegex;
 
       div.addEventListener("beforeshow", () => {
         function loginStatusListener(msg) {
--- a/browser/extensions/onboarding/locales/en-US/onboarding.properties
+++ b/browser/extensions/onboarding/locales/en-US/onboarding.properties
@@ -79,16 +79,17 @@ onboarding.tour-sync.logged-in.description=Sync works when you’re signed in to %1$S on more than one device. Have a mobile device? Install the %1$S app and sign in to get your bookmarks, history, and passwords on the go.
 # as a title and followed by onboarding.tour-sync.form.description.
 onboarding.tour-sync.form.title=Create a Firefox Account
 # LOCALIZATION NOTE(onboarding.tour-sync.form.description): The description
 # continues after onboarding.tour-sync.form.title to create a complete sentence.
 # If it's not possible for your locale, you can translate this string as
 # "Continue to Firefox Sync" instead.
 onboarding.tour-sync.form.description=to continue to Firefox Sync
 onboarding.tour-sync.button=Next
+onboarding.tour-sync.connect-device.button=Connect Another Device
 onboarding.tour-sync.email-input.placeholder=Email
 onboarding.notification.onboarding-tour-sync.title=Pick up where you left off.
 onboarding.notification.onboarding-tour-sync.message=Still sending yourself links to save or read on your phone? Do it the easy way: get Sync and have the things you save here show up on all of your devices.
 
 onboarding.tour-library=Library
 onboarding.tour-library.title=Keep it together.
 # LOCALIZATION NOTE (onboarding.tour-library.description2): This string will be used in the library tour description. %1$S is brandShortName
 onboarding.tour-library.description2=Check out the new %1$S library in the redesigned toolbar. The library puts the things you’ve seen and saved to %1$S — your browsing history, bookmarks, Pocket list, and synced tabs — in one convenient place.
--- a/dom/media/ChannelMediaResource.cpp
+++ b/dom/media/ChannelMediaResource.cpp
@@ -552,24 +552,24 @@ already_AddRefed<nsIPrincipal>
 ChannelMediaResource::GetCurrentPrincipal()
 {
   MOZ_ASSERT(NS_IsMainThread());
   return do_AddRef(mSharedInfo->mPrincipal);
 }
 
 bool ChannelMediaResource::CanClone()
 {
-  return mCacheStream.IsAvailableForSharing();
+  return !mClosed && mCacheStream.IsAvailableForSharing();
 }
 
 already_AddRefed<BaseMediaResource>
 ChannelMediaResource::CloneData(MediaResourceCallback* aCallback)
 {
-  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
-  NS_ASSERTION(mCacheStream.IsAvailableForSharing(), "Stream can't be cloned");
+  MOZ_ASSERT(NS_IsMainThread());
+  MOZ_ASSERT(CanClone(), "Stream can't be cloned");
 
   RefPtr<ChannelMediaResource> resource =
     new ChannelMediaResource(aCallback, nullptr, mURI);
 
   resource->mIsLiveStream = mIsLiveStream;
   resource->mIsTransportSeekable = mIsTransportSeekable;
   resource->mSharedInfo = mSharedInfo;
   mSharedInfo->mResources.AppendElement(resource.get());
--- a/dom/media/MediaCache.cpp
+++ b/dom/media/MediaCache.cpp
@@ -259,22 +259,22 @@ public:
   {
   public:
     ResourceStreamIterator(MediaCache* aMediaCache, int64_t aResourceID)
       : mMediaCache(aMediaCache)
       , mResourceID(aResourceID)
       , mNext(0)
     {
     }
-    MediaCacheStream* Next()
+    MediaCacheStream* Next(AutoLock& aLock)
     {
       while (mNext < mMediaCache->mStreams.Length()) {
         MediaCacheStream* stream = mMediaCache->mStreams[mNext];
         ++mNext;
-        if (stream->GetResourceID() == mResourceID && !stream->IsClosed())
+        if (stream->GetResourceID() == mResourceID && !stream->IsClosed(aLock))
           return stream;
       }
       return nullptr;
     }
   private:
     MediaCache* mMediaCache;
     int64_t  mResourceID;
     uint32_t mNext;
@@ -1573,17 +1573,17 @@ MediaCache::Update()
       default:
         break;
     }
   }
 
   // Notify streams about the suspended status changes.
   for (uint32_t i = 0; i < mSuspendedStatusToNotify.Length(); ++i) {
     MediaCache::ResourceStreamIterator iter(this, mSuspendedStatusToNotify[i]);
-    while (MediaCacheStream* stream = iter.Next()) {
+    while (MediaCacheStream* stream = iter.Next(lock)) {
       stream->mClient->CacheClientNotifySuspendedStatusChanged(
         stream->AreAllStreamsForResourceSuspended(lock));
     }
   }
   mSuspendedStatusToNotify.Clear();
 }
 
 class UpdateEvent : public Runnable
@@ -1696,17 +1696,17 @@ MediaCache::AllocateAndWriteBlock(AutoLo
                                   MediaCacheStream::ReadMode aMode,
                                   Span<const uint8_t> aData1,
                                   Span<const uint8_t> aData2)
 {
   MOZ_ASSERT(sThread->IsOnCurrentThread());
 
   // Remove all cached copies of this block
   ResourceStreamIterator iter(this, aStream->mResourceID);
-  while (MediaCacheStream* stream = iter.Next()) {
+  while (MediaCacheStream* stream = iter.Next(aLock)) {
     while (aStreamBlockIndex >= int32_t(stream->mBlocks.Length())) {
       stream->mBlocks.AppendElement(-1);
     }
     if (stream->mBlocks[aStreamBlockIndex] >= 0) {
       // We no longer want to own this block
       int32_t globalBlockIndex = stream->mBlocks[aStreamBlockIndex];
       LOG("Released block %d from stream %p block %d(%" PRId64 ")",
           globalBlockIndex,
@@ -1728,17 +1728,17 @@ MediaCache::AllocateAndWriteBlock(AutoLo
     Block* block = &mIndex[blockIndex];
     LOG("Allocated block %d to stream %p block %d(%" PRId64 ")",
         blockIndex,
         aStream,
         aStreamBlockIndex,
         aStreamBlockIndex * BLOCK_SIZE);
 
     ResourceStreamIterator iter(this, aStream->mResourceID);
-    while (MediaCacheStream* stream = iter.Next()) {
+    while (MediaCacheStream* stream = iter.Next(aLock)) {
       BlockOwner* bo = block->mOwners.AppendElement();
       if (!bo) {
         // Roll back mOwners if any allocation fails.
         block->mOwners.Clear();
         return;
       }
       mBlockOwnersWatermark =
         std::max(mBlockOwnersWatermark, uint32_t(block->mOwners.Length()));
@@ -2116,17 +2116,17 @@ MediaCacheStream::NotifyDataReceived(uin
                                    remaining);
       memcpy(buf.Elements(), source.Elements(), source.Length());
       mChannelOffset += source.Length();
       break;
     }
   }
 
   MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
-  while (MediaCacheStream* stream = iter.Next()) {
+  while (MediaCacheStream* stream = iter.Next(lock)) {
     if (stream->mStreamLength >= 0) {
       // The stream is at least as long as what we've read
       stream->mStreamLength = std::max(stream->mStreamLength, mChannelOffset);
     }
     stream->mClient->CacheClientNotifyDataReceived();
   }
 
   // Notify in case there's a waiting reader
@@ -2248,17 +2248,17 @@ MediaCacheStream::NotifyDataEndedInterna
     return;
   }
 
   // Note we don't flush the partial block when download ends abnormally for
   // the padding zeros will give wrong data to other streams.
   FlushPartialBlockInternal(lock, true);
 
   MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
-  while (MediaCacheStream* stream = iter.Next()) {
+  while (MediaCacheStream* stream = iter.Next(lock)) {
     // We read the whole stream, so remember the true length
     stream->mStreamLength = mChannelOffset;
     if (!stream->mDidNotifyDataEnded) {
       stream->mDidNotifyDataEnded = true;
       stream->mNotifyDataEndedStatus = aStatus;
       stream->mClient->CacheClientNotifyDataEnded(aStatus);
     }
   }
@@ -2348,17 +2348,17 @@ MediaCacheStream::~MediaCacheStream()
 bool
 MediaCacheStream::AreAllStreamsForResourceSuspended(AutoLock& aLock)
 {
   MOZ_ASSERT(!NS_IsMainThread());
 
   MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
   // Look for a stream that's able to read the data we need
   int64_t dataOffset = -1;
-  while (MediaCacheStream* stream = iter.Next()) {
+  while (MediaCacheStream* stream = iter.Next(aLock)) {
     if (stream->mCacheSuspended || stream->mChannelEnded || stream->mClosed) {
       continue;
     }
     if (dataOffset < 0) {
       dataOffset = GetCachedDataEndInternal(aLock, mStreamOffset);
     }
     // Ignore streams that are reading beyond the data we need
     if (stream->mChannelOffset > dataOffset) {
@@ -2368,38 +2368,49 @@ MediaCacheStream::AreAllStreamsForResour
   }
 
   return true;
 }
 
 void
 MediaCacheStream::Close()
 {
-  NS_ASSERTION(NS_IsMainThread(), "Only call on main thread");
-
-  if (!mMediaCache || mClosed) {
+  MOZ_ASSERT(NS_IsMainThread());
+  if (!mMediaCache) {
     return;
   }
+  OwnerThread()->Dispatch(NS_NewRunnableFunction(
+    "MediaCacheStream::Close",
+    [ this, client = RefPtr<ChannelMediaResource>(mClient) ]() {
+      AutoLock lock(mMediaCache->Monitor());
+      CloseInternal(lock);
+    }));
+}
 
-  AutoLock lock(mMediaCache->Monitor());
+void
+MediaCacheStream::CloseInternal(AutoLock& aLock)
+{
+  MOZ_ASSERT(OwnerThread()->IsOnCurrentThread());
+
+  if (mClosed) {
+    return;
+  }
 
   // Closing a stream will change the return value of
   // MediaCacheStream::AreAllStreamsForResourceSuspended as well as
   // ChannelMediaResource::IsSuspendedByCache. Let's notify it.
-  mMediaCache->QueueSuspendedStatusUpdate(lock, mResourceID);
+  mMediaCache->QueueSuspendedStatusUpdate(aLock, mResourceID);
 
   mClosed = true;
-  mMediaCache->ReleaseStreamBlocks(lock, this);
+  mMediaCache->ReleaseStreamBlocks(aLock, this);
   // Wake up any blocked readers
-  lock.NotifyAll();
+  aLock.NotifyAll();
 
-  // Queue an Update since we may have created more free space. Don't do
-  // it from CloseInternal since that gets called by Update() itself
-  // sometimes, and we try to not to queue updates from Update().
-  mMediaCache->QueueUpdate(lock);
+  // Queue an Update since we may have created more free space.
+  mMediaCache->QueueUpdate(aLock);
 }
 
 void
 MediaCacheStream::Pin()
 {
   // TODO: Assert non-main thread.
   AutoLock lock(mMediaCache->Monitor());
   ++mPinCount;
@@ -2711,17 +2722,17 @@ MediaCacheStream::Read(char* aBuffer, ui
       continue;
     }
 
     // See if we can use the data in the partial block of any stream reading
     // this resource. Note we use the partial block only when it is completed,
     // that is reaching EOS.
     bool foundDataInPartialBlock = false;
     MediaCache::ResourceStreamIterator iter(mMediaCache, mResourceID);
-    while (MediaCacheStream* stream = iter.Next()) {
+    while (MediaCacheStream* stream = iter.Next(lock)) {
       if (OffsetToBlockIndexUnchecked(stream->mChannelOffset) ==
             OffsetToBlockIndexUnchecked(streamOffset) &&
           stream->mChannelOffset == stream->mStreamLength) {
         uint32_t bytes = stream->ReadPartialBlock(lock, streamOffset, buffer);
         streamOffset += bytes;
         buffer = buffer.From(bytes);
         foundDataInPartialBlock = true;
         break;
@@ -2857,17 +2868,16 @@ MediaCacheStream::Init(int64_t aContentL
   AutoLock lock(mMediaCache->Monitor());
   mMediaCache->OpenStream(lock, this);
   return NS_OK;
 }
 
 nsresult
 MediaCacheStream::InitAsClone(MediaCacheStream* aOriginal)
 {
-  MOZ_ASSERT(aOriginal->IsAvailableForSharing());
   MOZ_ASSERT(!mMediaCache, "Has been initialized.");
   MOZ_ASSERT(aOriginal->mMediaCache, "Don't clone an uninitialized stream.");
 
   AutoLock lock(aOriginal->mMediaCache->Monitor());
 
   if (aOriginal->mDidNotifyDataEnded &&
       NS_FAILED(aOriginal->mNotifyDataEndedStatus)) {
     // Streams that ended abnormally are ineligible for cloning.
--- a/dom/media/MediaCache.h
+++ b/dom/media/MediaCache.h
@@ -222,21 +222,20 @@ public:
 
   nsIEventTarget* OwnerThread() const;
 
   // These are called on the main thread.
   // This must be called (and return) before the ChannelMediaResource
   // used to create this MediaCacheStream is deleted.
   void Close();
   // This returns true when the stream has been closed.
-  // Must be used on the main thread or while holding the cache lock.
-  bool IsClosed() const { return mClosed; }
+  bool IsClosed(AutoLock&) const { return mClosed; }
   // Returns true when this stream is can be shared by a new resource load.
   // Called on the main thread only.
-  bool IsAvailableForSharing() const { return !mClosed && !mIsPrivateBrowsing; }
+  bool IsAvailableForSharing() const { return !mIsPrivateBrowsing; }
 
   // These callbacks are called on the main thread by the client
   // when data has been received via the channel.
 
   // Notifies the cache that a load has begun. We pass the offset
   // because in some cases the offset might not be what the cache
   // requested. In particular we might unexpectedly start providing
   // data at offset 0. This need not be called if the offset is the
@@ -461,16 +460,18 @@ private:
                                  int64_t aLength);
 
   void NotifyDataEndedInternal(uint32_t aLoadID,
                                nsresult aStatus,
                                bool aReopenOnError);
 
   void UpdateDownloadStatistics(AutoLock&);
 
+  void CloseInternal(AutoLock&);
+
   // Instance of MediaCache to use with this MediaCacheStream.
   RefPtr<MediaCache> mMediaCache;
 
   ChannelMediaResource* const mClient;
 
   // The following fields must be written holding the cache's monitor and
   // only on the main thread, thus can be read either on the main thread
   // or while holding the cache's monitor.
--- a/dom/media/gtest/TestMP3Demuxer.cpp
+++ b/dom/media/gtest/TestMP3Demuxer.cpp
@@ -132,17 +132,17 @@ protected:
       res.mIsVBR = false;
       res.mFileSize = 191302;
       res.mMPEGLayer = 3;
       res.mMPEGVersion = 1;
       res.mID3MajorVersion = 3;
       res.mID3MinorVersion = 0;
       res.mID3Flags = 0;
       res.mID3Size = 115304;
-      res.mDuration = 3160816;
+      res.mDuration = 3166167;
       res.mDurationError = 0.001f;
       res.mSeekError = 0.02f;
       res.mSampleRate = 44100;
       res.mSamplesPerFrame = 1152;
       res.mNumSamples = 139392;
       res.mNumTrailingFrames = 0;
       res.mBitrate = 192000;
       res.mSlotSize = 1;
--- a/dom/media/mp3/MP3Demuxer.cpp
+++ b/dom/media/mp3/MP3Demuxer.cpp
@@ -386,26 +386,41 @@ MP3TrackDemuxer::Duration() const
     return TimeUnit::FromMicroseconds(-1);
   }
 
   int64_t numFrames = 0;
   const auto numAudioFrames = mParser.VBRInfo().NumAudioFrames();
   if (mParser.VBRInfo().IsValid() && numAudioFrames.valueOr(0) + 1 > 1) {
     // VBR headers don't include the VBR header frame.
     numFrames = numAudioFrames.value() + 1;
-  } else {
-    const int64_t streamLen = StreamLength();
-    if (streamLen < 0) {
-      // Unknown length, we can't estimate duration.
-      return TimeUnit::FromMicroseconds(-1);
-    }
-    if (AverageFrameLength() > 0) {
-      numFrames = (streamLen - mFirstFrameOffset) / AverageFrameLength();
-    }
+    return Duration(numFrames);
+  }
+
+  const int64_t streamLen = StreamLength();
+  if (streamLen < 0) { // Live streams.
+    // Unknown length, we can't estimate duration.
+    return TimeUnit::FromMicroseconds(-1);
   }
+  // We can't early return when streamLen < 0 before checking numAudioFrames
+  // since some live radio will give an opening remark before playing music
+  // and the duration of the opening talk can be calculated by numAudioFrames.
+
+  const int64_t size = streamLen - mFirstFrameOffset;
+  MOZ_ASSERT(size);
+
+  // If it's CBR, calculate the duration by bitrate.
+  if (!mParser.VBRInfo().IsValid()) {
+    const int32_t bitrate = mParser.CurrentFrame().Header().Bitrate();
+    return media::TimeUnit::FromSeconds(static_cast<double>(size) * 8 / bitrate);
+  }
+
+  if (AverageFrameLength() > 0) {
+    numFrames = size / AverageFrameLength();
+  }
+
   return Duration(numFrames);
 }
 
 TimeUnit
 MP3TrackDemuxer::Duration(int64_t aNumFrames) const
 {
   if (!mSamplesPerSecond) {
     return TimeUnit::FromMicroseconds(-1);
--- a/python/mozbuild/mozbuild/backend/recursivemake.py
+++ b/python/mozbuild/mozbuild/backend/recursivemake.py
@@ -196,27 +196,27 @@ class BackendMakeFile(object):
     invalidate all make targets across the whole tree! This would effectively
     undermine incremental builds as any mozbuild change would cause the entire
     tree to rebuild!
 
     The solution is to not update the mtimes of backend.mk files unless they
     actually change. We use FileAvoidWrite to accomplish this.
     """
 
-    def __init__(self, srcdir, objdir, environment, topsrcdir, topobjdir):
+    def __init__(self, srcdir, objdir, environment, topsrcdir, topobjdir, dry_run):
         self.topsrcdir = topsrcdir
         self.srcdir = srcdir
         self.objdir = objdir
         self.relobjdir = mozpath.relpath(objdir, topobjdir)
         self.environment = environment
         self.name = mozpath.join(objdir, 'backend.mk')
 
         self.xpt_name = None
 
-        self.fh = FileAvoidWrite(self.name, capture_diff=True)
+        self.fh = FileAvoidWrite(self.name, capture_diff=True, dry_run=dry_run)
         self.fh.write('# THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT.\n')
         self.fh.write('\n')
 
     def write(self, buf):
         self.fh.write(buf)
 
     def write_once(self, buf):
         if isinstance(buf, unicode):
@@ -428,17 +428,17 @@ class RecursiveMakeBackend(CommonBackend
                        makefile_in=self._makefile_in_count,
                        makefile_out=self._makefile_out_count)
         return summary
 
     def _get_backend_file_for(self, obj):
         if obj.objdir not in self._backend_files:
             self._backend_files[obj.objdir] = \
                 BackendMakeFile(obj.srcdir, obj.objdir, obj.config,
-                    obj.topsrcdir, self.environment.topobjdir)
+                    obj.topsrcdir, self.environment.topobjdir, self.dry_run)
         return self._backend_files[obj.objdir]
 
     def consume_object(self, obj):
         """Write out build files necessary to build with recursive make."""
 
         if not isinstance(obj, ContextDerived):
             return False
 
--- a/security/sandbox/mac/SandboxPolicies.h
+++ b/security/sandbox/mac/SandboxPolicies.h
@@ -70,23 +70,21 @@ static const char contentSandboxRules[] 
   ; Allow read access to standard system paths.
   (allow file-read*
     (require-all (file-mode #o0004)
       (require-any (subpath "/Library/Filesystems/NetFSPlugins")
         (subpath "/System")
         (subpath "/usr/lib")
         (subpath "/usr/share"))))
 
+  ; Top-level directory metadata access (bug 1404298)
+  (allow file-read-metadata (regex #"^/[^/]+$"))
+
   (allow file-read-metadata
-    (literal "/etc")
-    (literal "/tmp")
-    (literal "/var")
     (literal "/private/etc/localtime")
-    (literal "/home")
-    (literal "/net")
     (regex #"^/private/tmp/KSInstallAction\."))
 
   ; Allow read access to standard special files.
   (allow file-read*
     (literal "/dev/autofs_nowait")
     (literal "/dev/random")
     (literal "/dev/urandom"))
 
--- a/security/sandbox/test/browser_content_sandbox_fs.js
+++ b/security/sandbox/test/browser_content_sandbox_fs.js
@@ -83,16 +83,29 @@ function readFile(path) {
   let promise = OS.File.read(path).then(function (binaryData) {
     return {ok: true};
   }).catch(function (error) {
     return {ok: false};
   });
   return promise;
 }
 
+// Does a stat of |path| and returns a promise that resolves if the
+// stat is successful. Returned object has boolean .ok to indicate
+// success or failure.
+function statPath(path) {
+  Components.utils.import("resource://gre/modules/osfile.jsm");
+  let promise = OS.File.stat(path).then(function (stat) {
+    return {ok: true};
+  }).catch(function (error) {
+    return {ok: false};
+  });
+  return promise;
+}
+
 // Returns true if the current content sandbox level, passed in
 // the |level| argument, supports filesystem sandboxing.
 function isContentFileIOSandboxed(level) {
   let fileIOSandboxMinLevel = 0;
 
   // Set fileIOSandboxMinLevel to the lowest level that has
   // content filesystem sandboxing enabled. For now, this
   // varies across Windows, Mac, Linux, other.
@@ -342,91 +355,99 @@ async function testFileAccess() {
 
       let fontFile = GetFile(fontPath);
       tests.push({
         desc:     "font file",                  // description
         ok:       true,                         // expected to succeed?
         browser:  webBrowser,                   // browser to run test in
         file:     fontFile,                     // nsIFile object
         minLevel: minHomeReadSandboxLevel(),    // min level to enable test
+        func:     readFile,                     // the test function to use
       });
     }
     for (let fontPath of badFontTestPaths) {
       let result = await createFile(fontPath);
       Assert.ok(result, `${fontPath} created`);
 
       let fontFile = GetFile(fontPath);
       tests.push({
         desc:     "invalid font file",          // description
         ok:       false,                        // expected to succeed?
         browser:  webBrowser,                   // browser to run test in
         file:     fontFile,                     // nsIFile object
         minLevel: minHomeReadSandboxLevel(),    // min level to enable test
+        func:     readFile,                     // the test function to use
       });
     }
   }
 
   // The Linux test runners create the temporary profile in the same
   // system temp dir we give write access to, so this gives a false
   // positive.
   let profileDir = GetProfileDir();
   if (!isLinux()) {
     tests.push({
       desc:     "profile dir",                // description
       ok:       false,                        // expected to succeed?
       browser:  webBrowser,                   // browser to run test in
       file:     profileDir,                   // nsIFile object
       minLevel: minProfileReadSandboxLevel(), // min level to enable test
+      func:     readDir,
     });
   }
   if (fileContentProcessEnabled) {
     tests.push({
       desc:     "profile dir",
       ok:       true,
       browser:  fileBrowser,
       file:     profileDir,
       minLevel: 0,
+      func:     readDir,
     });
   }
 
   let homeDir = GetHomeDir();
   tests.push({
     desc:     "home dir",
     ok:       false,
     browser:  webBrowser,
     file:     homeDir,
     minLevel: minHomeReadSandboxLevel(),
+    func:     readDir,
   });
   if (fileContentProcessEnabled) {
     tests.push({
       desc:     "home dir",
       ok:       true,
       browser:  fileBrowser,
       file:     homeDir,
       minLevel: 0,
+      func:     readDir,
     });
   }
 
   let sysExtDevDir = GetSystemExtensionsDevDir();
   tests.push({
     desc:     "system extensions dev dir",
     ok:       true,
     browser:  webBrowser,
     file:     sysExtDevDir,
     minLevel: 0,
+    func:     readDir,
   });
 
   if (isWin()) {
     let extDir = GetPerUserExtensionDir();
     tests.push({
       desc:       "per-user extensions dir",
       ok:         true,
       browser:    webBrowser,
       file:       extDir,
       minLevel:   minHomeReadSandboxLevel(),
+      func:       readDir,
     });
   }
 
   if (isMac()) {
     // If ~/Library/Caches/TemporaryItems exists, when level <= 2 we
     // make sure it's readable. For level 3, we make sure it isn't.
     let homeTempDir = GetHomeDir();
     homeTempDir.appendRelativePath("Library/Caches/TemporaryItems");
@@ -440,16 +461,17 @@ async function testFileAccess() {
         minLevel = 0;
       }
       tests.push({
         desc:     "home library cache temp dir",
         ok:       shouldBeReadable,
         browser:  webBrowser,
         file:     homeTempDir,
         minLevel,
+        func:     readDir,
       });
     }
   }
 
   if (isMac() || isLinux()) {
     let varDir = GetDir("/var");
 
     if (isMac()) {
@@ -460,24 +482,26 @@ async function testFileAccess() {
     }
 
     tests.push({
       desc:     "/var",
       ok:       false,
       browser:  webBrowser,
       file:     varDir,
       minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
     });
     if (fileContentProcessEnabled) {
       tests.push({
         desc:     "/var",
         ok:       true,
         browser:  fileBrowser,
         file:     varDir,
         minLevel: 0,
+        func:     readDir,
       });
     }
   }
 
   if (isMac()) {
     // Test if we can read from $TMPDIR because we expect it
     // to be within /private/var. Reading from it should be
     // prevented in a 'web' process.
@@ -488,126 +512,202 @@ async function testFileAccess() {
       "$TMPDIR is in /private/var");
 
     tests.push({
       desc:     `$TMPDIR (${macTempDir.path})`,
       ok:       false,
       browser:  webBrowser,
       file:     macTempDir,
       minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
     });
     if (fileContentProcessEnabled) {
       tests.push({
         desc:     `$TMPDIR (${macTempDir.path})`,
         ok:       true,
         browser:  fileBrowser,
         file:     macTempDir,
         minLevel: 0,
+        func:     readDir,
       });
     }
 
     // Test that we cannot read from /Volumes at level 3
     let volumes = GetDir("/Volumes");
     tests.push({
       desc:     "/Volumes",
       ok:       false,
       browser:  webBrowser,
       file:     volumes,
       minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
     });
     // Test that we cannot read from /Network at level 3
     let network = GetDir("/Network");
     tests.push({
       desc:     "/Network",
       ok:       false,
       browser:  webBrowser,
       file:     network,
       minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
     });
     // Test that we cannot read from /Users at level 3
     let users = GetDir("/Users");
     tests.push({
       desc:     "/Users",
       ok:       false,
       browser:  webBrowser,
       file:     users,
       minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
+    });
+
+    // Test that we can stat /Users at level 3
+    tests.push({
+      desc:     "/Users",
+      ok:       true,
+      browser:  webBrowser,
+      file:     users,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     statPath,
+    });
+
+    // Test that we can stat /Library at level 3, but can't
+    // stat something within /Library. This test uses "/Library"
+    // because it's a path that is expected to always be present
+    // and isn't something content processes have read access to
+    // (just read-metadata).
+    let libraryDir = GetDir("/Library");
+    tests.push({
+      desc:     "/Library",
+      ok:       true,
+      browser:  webBrowser,
+      file:     libraryDir,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     statPath,
+    });
+    tests.push({
+      desc:     "/Library",
+      ok:       false,
+      browser:  webBrowser,
+      file:     libraryDir,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     readDir,
+    });
+    let libraryWidgetsDir = GetDir("/Library/Widgets");
+    tests.push({
+      desc:     "/Library/Widgets",
+      ok:       false,
+      browser:  webBrowser,
+      file:     libraryWidgetsDir,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     statPath,
+    });
+
+    // Similarly, test that we can stat /private, but not /private/etc.
+    let privateDir = GetDir("/private");
+    tests.push({
+      desc:     "/private",
+      ok:       true,
+      browser:  webBrowser,
+      file:     privateDir,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     statPath,
+    });
+    let privateEtcDir = GetFile("/private/etc");
+    tests.push({
+      desc:     "/private/etc",
+      ok:       false,
+      browser:  webBrowser,
+      file:     privateEtcDir,
+      minLevel: minHomeReadSandboxLevel(),
+      func:     statPath,
     });
   }
 
   let extensionsDir = GetProfileEntry("extensions");
   if (extensionsDir.exists() && extensionsDir.isDirectory()) {
     tests.push({
       desc:     "extensions dir",
       ok:       true,
       browser:  webBrowser,
       file:     extensionsDir,
       minLevel: 0,
+      func:     readDir,
     });
   } else {
     ok(false, `${extensionsDir.path} is a valid dir`);
   }
 
   let chromeDir = GetProfileEntry("chrome");
   if (chromeDir.exists() && chromeDir.isDirectory()) {
     tests.push({
       desc:     "chrome dir",
       ok:       true,
       browser:  webBrowser,
       file:     chromeDir,
       minLevel: 0,
+      func:     readDir,
     });
   } else {
     ok(false, `${chromeDir.path} is valid dir`);
   }
 
   let cookiesFile = GetProfileEntry("cookies.sqlite");
   if (cookiesFile.exists() && !cookiesFile.isDirectory()) {
     // On Linux, the temporary profile used for tests is in the system
     // temp dir which content has read access to, so this test fails.
     if (!isLinux()) {
       tests.push({
         desc:     "cookies file",
         ok:       false,
         browser:  webBrowser,
         file:     cookiesFile,
         minLevel: minProfileReadSandboxLevel(),
+        func:     readFile,
       });
     }
     if (fileContentProcessEnabled) {
       tests.push({
         desc:     "cookies file",
         ok:       true,
         browser:  fileBrowser,
         file:     cookiesFile,
         minLevel: 0,
+        func:     readFile,
       });
     }
   } else {
     ok(false, `${cookiesFile.path} is a valid file`);
   }
 
   // remove tests not enabled by the current sandbox level
   tests = tests.filter((test) => (test.minLevel <= level));
 
   for (let test of tests) {
-    let testFunc = test.file.isDirectory() ? readDir : readFile;
     let okString = test.ok ? "allowed" : "blocked";
     let processType = test.browser === webBrowser ? "web" : "file";
 
+    // ensure the file/dir exists before we ask a content process to stat
+    // it so we know a failure is not due to a nonexistent file/dir
+    if (test.func === statPath) {
+      ok(test.file.exists(), `${test.file.path} exists`);
+    }
+
     let result = await ContentTask.spawn(test.browser, test.file.path,
-        testFunc);
+        test.func);
 
     ok(result.ok == test.ok,
         `reading ${test.desc} from a ${processType} process ` +
         `is ${okString} (${test.file.path})`);
 
     // if the directory is not expected to be readable,
     // ensure the listing has zero entries
-    if (test.file.isDirectory() && !test.ok) {
+    if (test.func === readDir && !test.ok) {
       ok(result.numEntries == 0, `directory list is empty (${test.file.path})`);
     }
   }
 
   if (fileContentProcessEnabled) {
     gBrowser.removeTab(gBrowser.selectedTab);
   }
 }
--- a/servo/components/canvas/canvas_paint_thread.rs
+++ b/servo/components/canvas/canvas_paint_thread.rs
@@ -6,17 +6,17 @@ use azure::azure::AzFloat;
 use azure::azure_hl::{AntialiasMode, CapStyle, CompositionOp, JoinStyle};
 use azure::azure_hl::{BackendType, DrawOptions, DrawTarget, Pattern, StrokeOptions, SurfaceFormat};
 use azure::azure_hl::{Color, ColorPattern, DrawSurfaceOptions, Filter, PathBuilder};
 use azure::azure_hl::{ExtendMode, GradientStop, LinearGradientPattern, RadialGradientPattern};
 use azure::azure_hl::SurfacePattern;
 use canvas_traits::canvas::*;
 use cssparser::RGBA;
 use euclid::{Transform2D, Point2D, Vector2D, Rect, Size2D};
-use ipc_channel::ipc::{self, IpcSender, IpcReceiver};
+use ipc_channel::ipc::{self, IpcSender};
 use num_traits::ToPrimitive;
 use std::borrow::ToOwned;
 use std::mem;
 use std::sync::Arc;
 use std::thread;
 use webrender_api;
 
 impl<'a> CanvasPaintThread<'a> {
--- a/taskcluster/ci/build/linux.yml
+++ b/taskcluster/ci/build/linux.yml
@@ -350,23 +350,24 @@ linux/pgo:
         - linux64-sccache
 
 linux-rusttests/opt:
     description: "Linux32 Rust tests Opt"
     index:
         product: firefox
         job-name: linux-rusttests-opt
     treeherder:
-        platform: linux32-rusttests/opt
+        platform: linux32/opt
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-linux
     worker:
         max-run-time: 5400
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux32/releng.manifest"
     run:
         using: mozharness
         actions: [get-secrets build check-test update]
         config:
             - builds/releng_base_firefox.py
             - builds/releng_base_linux_32_builds.py
             - balrog/production.py
@@ -384,23 +385,24 @@ linux-rusttests/opt:
         - linux64-sccache
 
 linux-rusttests/debug:
     description: "Linux32 Rust tests Debug"
     index:
         product: firefox
         job-name: linux-rusttests-debug
     treeherder:
-        platform: linux32-rusttests/debug
+        platform: linux32/debug
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-linux
     worker:
         max-run-time: 5400
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux32/releng.manifest"
     run:
         using: mozharness
         actions: [get-secrets build check-test update]
         config:
             - builds/releng_base_firefox.py
             - builds/releng_base_linux_32_builds.py
             - balrog/production.py
@@ -693,23 +695,24 @@ linux64-noopt/debug:
         - linux64-sccache
 
 linux64-rusttests/opt:
     description: "Linux64 Rust tests Opt"
     index:
         product: firefox
         job-name: linux64-rusttests-opt
     treeherder:
-        platform: linux64-rusttests/opt
+        platform: linux64/opt
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-linux
     worker:
         max-run-time: 5400
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest"
     run:
         using: mozharness
         actions: [get-secrets build check-test update]
         config:
             - builds/releng_base_firefox.py
             - builds/releng_base_linux_64_builds.py
             - balrog/production.py
@@ -727,23 +730,24 @@ linux64-rusttests/opt:
         - linux64-sccache
 
 linux64-rusttests/debug:
     description: "Linux64 Rust tests Debug"
     index:
         product: firefox
         job-name: linux64-rusttests-debug
     treeherder:
-        platform: linux64-rusttests/debug
+        platform: linux64/debug
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-linux
     worker:
         max-run-time: 5400
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/linux64/releng.manifest"
     run:
         using: mozharness
         actions: [get-secrets build check-test update]
         config:
             - builds/releng_base_firefox.py
             - builds/releng_base_linux_64_builds.py
             - balrog/production.py
--- a/taskcluster/ci/build/windows.yml
+++ b/taskcluster/ci/build/windows.yml
@@ -419,23 +419,24 @@ win32-noopt/debug:
         - win64-sccache
 
 win32-rusttests/opt:
     description: "Win32 Opt Rust tests"
     index:
         product: firefox
         job-name: win32-rusttests-opt
     treeherder:
-        platform: windows2012-32-rusttests/opt
+        platform: windows2012-32/opt
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
     worker:
         max-run-time: 7200
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win32/releng.manifest"
     run:
         using: mozharness
         options: [append-env-variables-from-configs]
         script: mozharness/scripts/fx_desktop_build.py
         config:
             - builds/releng_base_firefox.py
             - builds/taskcluster_base_windows.py
@@ -448,23 +449,24 @@ win32-rusttests/opt:
         - win64-sccache
 
 win64-rusttests/opt:
     description: "Win64 Opt Rust tests"
     index:
         product: firefox
         job-name: win64-rusttests-opt
     treeherder:
-        platform: windows2012-64-rusttests/opt
+        platform: windows2012-64/opt
         symbol: tc(BR)
         tier: 2
     worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
     worker:
         max-run-time: 7200
         env:
+            PERFHERDER_EXTRA_OPTIONS: rusttests
             TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win64/releng.manifest"
     run:
         using: mozharness
         options: [append-env-variables-from-configs]
         script: mozharness/scripts/fx_desktop_build.py
         config:
             - builds/releng_base_firefox.py
             - builds/taskcluster_base_windows.py
--- a/taskcluster/ci/upload-symbols/kind.yml
+++ b/taskcluster/ci/upload-symbols/kind.yml
@@ -41,15 +41,14 @@ job-template:
         env:
             ARTIFACT_TASKID: {"task-reference": "<build>"}
             # {level} gets replaced in the upload_symbols transform
             SYMBOL_SECRET: "project/releng/gecko/build/level-{level}/gecko-symbol-upload"
     run:
         using: run-task
         command: >
             cd /builds/worker/checkouts/gecko &&
-            wget https://queue.taskcluster.net/v1/task/${ARTIFACT_TASKID}/artifacts/public/build/target.crashreporter-symbols-full.zip &&
-            ./mach python toolkit/crashreporter/tools/upload_symbols.py target.crashreporter-symbols-full.zip
+            ./mach python toolkit/crashreporter/tools/upload_symbols.py https://queue.taskcluster.net/v1/task/${ARTIFACT_TASKID}/artifacts/public/build/target.crashreporter-symbols-full.zip
         sparse-profile: upload-symbols
     optimization:
         only-if-dependencies-run: null
     scopes:
         - secrets:get:project/releng/gecko/build/level-{level}/gecko-symbol-upload
--- a/taskcluster/docker/recipes/install-mercurial.sh
+++ b/taskcluster/docker/recipes/install-mercurial.sh
@@ -8,23 +8,23 @@
 set -e
 
 # Detect OS.
 if [ -f /etc/lsb-release ]; then
     . /etc/lsb-release
 
     if [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "16.04" ]; then
         HG_DEB=1
-        HG_DIGEST=dd4dd7759fe73985b6a0424b34a3036d130c26defdd866a9fdd7302e40c7417433b93f020497ceb40593eaead8e86be55e48340887015645202b47ff7b0d7ac6
-        HG_SIZE=181722
-        HG_FILENAME=mercurial_4.3.1_amd64.deb
+        HG_DIGEST=458746bd82b4732c72c611f1041f77a47a683bc75ff3f6ab7ed86ea394f48d94cd7e2d3d1d5b020906318a9a24bea27401a3a63d7e645514dbc2cb581621977f
+        HG_SIZE=193710
+        HG_FILENAME=mercurial_4.4.2_amd64.deb
 
-        HG_COMMON_DIGEST=045f7e07f1e2e0fef767b2f50a7e9ab37d5da0bfead5ddf473ae044b61a4566aed2d6f2706f52d227947d713ef8e89eb9a269288f08e52924e4de88a39cd7ac0
-        HG_COMMON_SIZE=2017628
-        HG_COMMON_FILENAME=mercurial-common_4.3.1_all.deb
+        HG_COMMON_DIGEST=8074efbfff974f0bbdd0c3be3d272cc7a634456921e04db31369fbec1c9256ddaf44bdbe120f6f33113d2be9324a1537048028ebaaf205c6659e476a757358fd
+        HG_COMMON_SIZE=2097892
+        HG_COMMON_FILENAME=mercurial-common_4.4.2_all.deb
     elif [ "${DISTRIB_ID}" = "Ubuntu" -a "${DISTRIB_RELEASE}" = "12.04" ]; then
         echo "Ubuntu 12.04 not supported"
         exit 1
     fi
 
     CERT_PATH=/etc/ssl/certs/ca-certificates.crt
 
 elif [ -f /etc/os-release ]; then
@@ -95,25 +95,25 @@ tooltool_fetch <<EOF
 ]
 EOF
 
     rpm -i ${HG_FILENAME}
 elif [ -n "${PIP_PATH}" ]; then
 tooltool_fetch <<EOF
 [
   {
-    "size": 5475042,
-    "digest": "4c42d06b7f111a3e825dd927704a30f88f0b2225cf87ab8954bf53a7fbc0edf561374dd49b13d9c10140d98ff5853a64acb5a744349727abae81d32da401922b",
+    "size": 5647013,
+    "digest": "3d1d103689eac4f50cc1005be44144b37d75ebfac3ff3b4fc90d6f41fbee46e107a168d04f2c366ce7cca2733ea4e5b5127df462af8e253f61a72f8938833993",
     "algorithm": "sha512",
-    "filename": "mercurial-4.3.1.tar.gz"
+    "filename": "mercurial-4.4.2.tar.gz"
   }
 ]
 EOF
 
-   ${PIP_PATH} install mercurial-4.3.1.tar.gz
+   ${PIP_PATH} install mercurial-4.4.2.tar.gz
 else
     echo "Do not know how to install Mercurial on this OS"
     exit 1
 fi
 
 chmod 644 /usr/local/mercurial/robustcheckout.py
 
 mkdir -p /etc/mercurial
--- a/testing/mozharness/external_tools/robustcheckout.py
+++ b/testing/mozharness/external_tools/robustcheckout.py
@@ -32,28 +32,44 @@ from mercurial import (
     cmdutil,
     hg,
     match as matchmod,
     registrar,
     scmutil,
     util,
 )
 
-testedwith = '3.7 3.8 3.9 4.0 4.1 4.2 4.3'
+# TRACKING hg43
+try:
+    from mercurial import configitems
+except ImportError:
+    configitems = None
+
+testedwith = '3.7 3.8 3.9 4.0 4.1 4.2 4.3 4.4'
 minimumhgversion = '3.7'
 
 cmdtable = {}
 
-# Mercurial 4.3 introduced registrar.command as a replacement for
+# TRACKING hg43 Mercurial 4.3 introduced registrar.command as a replacement for
 # cmdutil.command.
 if util.safehasattr(registrar, 'command'):
     command = registrar.command(cmdtable)
 else:
     command = cmdutil.command(cmdtable)
 
+# TRACKING hg43 Mercurial 4.3 introduced the config registrar. 4.4 requires
+# config items to be registered to avoid a devel warning
+if util.safehasattr(registrar, 'configitem'):
+    configtable = {}
+    configitem = registrar.configitem(configtable)
+
+    configitem('robustcheckout', 'retryjittermin', default=configitems.dynamicdefault)
+    configitem('robustcheckout', 'retryjittermax', default=configitems.dynamicdefault)
+
+
 # Mercurial 4.2 introduced the vfs module and deprecated the symbol in
 # scmutil.
 def getvfs():
     try:
         from mercurial.vfs import vfs
         return vfs
     except ImportError:
         return scmutil.vfs
@@ -201,19 +217,19 @@ def robustcheckout(ui, url, dest, upstre
 
     # Sparse profile support was added in Mercurial 4.3, where it was highly
     # experimental. Because of the fragility of it, we only support sparse
     # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
     # support. We /could/ silently fall back to non-sparse when not supported.
     # However, given that sparse has performance implications, we want to fail
     # fast if we can't satisfy the desired checkout request.
     if sparseprofile:
-        if util.versiontuple(n=2) != (4, 3):
+        if util.versiontuple(n=2) not in ((4, 3), (4, 4)):
             raise error.Abort('sparse profile support only available for '
-                              'Mercurial 4.3 (using %s)' % util.version())
+                              'Mercurial versions greater than 4.3 (using %s)' % util.version())
 
         try:
             extensions.find('sparse')
         except KeyError:
             raise error.Abort('sparse extension must be enabled to use '
                               '--sparseprofile')
 
     ui.warn('(using Mercurial %s)\n' % util.version())
@@ -540,17 +556,17 @@ def _docheckout(ui, url, dest, upstream,
         purgeext = extensions.find('purge')
 
         # Mercurial 4.3 doesn't purge files outside the sparse checkout.
         # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
         # purging by monkeypatching the sparse matcher.
         try:
             old_sparse_fn = getattr(repo.dirstate, '_sparsematchfn', None)
             if old_sparse_fn is not None:
-                assert util.versiontuple(n=2) == (4, 3)
+                assert util.versiontuple(n=2) in ((4, 3), (4, 4))
                 repo.dirstate._sparsematchfn = lambda: matchmod.always(repo.root, '')
 
             if purgeext.purge(ui, repo, all=True, abort_on_err=True,
                               # The function expects all arguments to be
                               # defined.
                               **{'print': None, 'print0': None, 'dirs': None,
                                  'files': None}):
                 raise error.Abort('error purging')
--- a/toolkit/crashreporter/tools/upload_symbols.py
+++ b/toolkit/crashreporter/tools/upload_symbols.py
@@ -1,22 +1,22 @@
 #!/usr/bin/env python
 #
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 #
-# This script uploads a symbol zip file passed on the commandline
-# to the Tecken symbol upload API at https://symbols.mozilla.org/ .
+# This script uploads a symbol zip file from a path or URL passed on the commandline
+# to the symbol server at https://symbols.mozilla.org/ .
 #
 # Using this script requires you to have generated an authentication
-# token in the Tecken web interface. You must store the token in a Taskcluster
+# token in the symbol server web interface. You must store the token in a Taskcluster
 # secret as the JSON blob `{"token": "<token>"}` and set the `SYMBOL_SECRET`
-# environment variable to the name of the Taskcluster secret. Alternatively,
-# you can pu the token in a file and set SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE
+# environment variable to the name of the Taskcluster secret. Alternately,
+# you can put the token in a file and set `SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE`
 # environment variable to the path to the file.
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 import argparse
 import logging
 import os
 import sys
@@ -63,24 +63,25 @@ def main():
 
     import redo
     import requests
 
     logging.basicConfig()
     parser = argparse.ArgumentParser(
         description='Upload symbols in ZIP using token from Taskcluster secrets service.')
     parser.add_argument('zip',
-                        help='Symbols zip file')
+                        help='Symbols zip file - URL or path to local file')
     args = parser.parse_args()
 
-    if not os.path.isfile(args.zip):
+    if not args.zip.startswith('http') and not os.path.isfile(args.zip):
         log.error('Error: zip file "{0}" does not exist!'.format(args.zip),
                   file=sys.stderr)
         return 1
 
+
     secret_name = os.environ.get('SYMBOL_SECRET')
     if secret_name is not None:
         auth_token = get_taskcluster_secret(secret_name)
     elif 'SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE' in os.environ:
         token_file = os.environ['SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE']
 
         if not os.path.isfile(token_file):
             log.error('SOCORRO_SYMBOL_UPLOAD_TOKEN_FILE "{0}" does not exist!'.format(token_file), file=sys.stderr)
@@ -100,22 +101,29 @@ def main():
     else:
         url = DEFAULT_URL
 
     log.info('Uploading symbol file "{0}" to "{1}"'.format(args.zip, url))
 
     for i, _ in enumerate(redo.retrier(attempts=MAX_RETRIES), start=1):
         log.info('Attempt %d of %d...' % (i, MAX_RETRIES))
         try:
+            if args.zip.startswith('http'):
+                zip_arg = {'data': {'url': args.zip}}
+            else:
+                zip_arg = {'files': {'symbols.zip': open(args.zip, 'rb')}}
             r = requests.post(
                 url,
-                files={'symbols.zip': open(args.zip, 'rb')},
                 headers={'Auth-Token': auth_token},
                 allow_redirects=False,
-                timeout=120)
+                # Allow a longer read timeout because uploading by URL means the server
+                # has to fetch the entire zip file, which can take a while. The load balancer
+                # in front of symbols.mozilla.org has a 300 second timeout, so we'll use that.
+                timeout=(10, 300),
+                **zip_arg)
             # 500 is likely to be a transient failure.
             # Break out for success or other error codes.
             if r.status_code < 500:
                 break
             print_error(r)
         except requests.exceptions.RequestException as e:
             log.error('Error: {0}'.format(e))
         log.info('Retrying...')