Merge mozilla-central to autoland. a=merge
authorCosmin Sabou <csabou@mozilla.com>
Thu, 04 Oct 2018 02:36:53 +0300
changeset 495209 ff773ba8e86997a8b7bb049ee6baf70b25ec013d
parent 495208 801d7d6ee6f046561c78710a9fb14af8b5a8bb39 (current diff)
parent 495193 65fc22a1627b7de54303275dbe2422a61080865c (diff)
child 495210 9c6662e31204379c3e6f8dc4b0909f95b573664f
push id9984
push userffxbld-merge
push dateMon, 15 Oct 2018 21:07:35 +0000
treeherdermozilla-beta@183d27ea8570 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone64.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-central to autoland. a=merge
testing/web-platform/meta/css/css-images/multiple-position-color-stop-linear.html.ini
testing/web-platform/meta/css/css-images/multiple-position-color-stop-radial.html.ini
testing/web-platform/meta/webvr/webvr-enabled-by-feature-policy-attribute.https.sub.html.ini
--- a/browser/config/mozconfigs/linux32/nightly
+++ b/browser/config/mozconfigs/linux32/nightly
@@ -1,8 +1,7 @@
 . "$topsrcdir/browser/config/mozconfigs/linux32/common-opt"
 
 ac_add_options --enable-verify-mar
-ac_add_options --enable-dmd
 
 ac_add_options --with-branding=browser/branding/nightly
 
 . "$topsrcdir/build/mozconfig.common.override"
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/linux32/opt-dmd
@@ -0,0 +1,3 @@
+ac_add_options --enable-dmd
+
+. "$topsrcdir/browser/config/mozconfigs/linux32/nightly"
--- a/browser/config/mozconfigs/linux32/valgrind
+++ b/browser/config/mozconfigs/linux32/valgrind
@@ -1,11 +1,10 @@
 . $topsrcdir/browser/config/mozconfigs/linux32/nightly
 
 ac_add_options --enable-valgrind
-ac_add_options --disable-dmd
 ac_add_options --disable-jemalloc
 ac_add_options --disable-install-strip
 ac_add_options --disable-gtest-in-build
 
 # Include the override mozconfig again (even though the above includes it)
 # since it's supposed to override everything.
 . "$topsrcdir/build/mozconfig.common.override"
--- a/browser/config/mozconfigs/linux64/nightly
+++ b/browser/config/mozconfigs/linux64/nightly
@@ -1,8 +1,7 @@
 . "$topsrcdir/browser/config/mozconfigs/linux64/common-opt"
 
 ac_add_options --enable-verify-mar
-ac_add_options --enable-dmd
 
 ac_add_options --with-branding=browser/branding/nightly
 
 . "$topsrcdir/build/mozconfig.common.override"
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/linux64/opt-dmd
@@ -0,0 +1,3 @@
+ac_add_options --enable-dmd
+
+. "$topsrcdir/browser/config/mozconfigs/linux64/nightly"
--- a/browser/config/mozconfigs/linux64/valgrind
+++ b/browser/config/mozconfigs/linux64/valgrind
@@ -1,11 +1,10 @@
 . $topsrcdir/browser/config/mozconfigs/linux64/nightly
 
 ac_add_options --enable-valgrind
-ac_add_options --disable-dmd
 ac_add_options --disable-jemalloc
 ac_add_options --disable-install-strip
 ac_add_options --disable-gtest-in-build
 
 # Include the override mozconfig again (even though the above includes it)
 # since it's supposed to override everything.
 . "$topsrcdir/build/mozconfig.common.override"
--- a/browser/config/mozconfigs/macosx64/nightly
+++ b/browser/config/mozconfigs/macosx64/nightly
@@ -1,14 +1,13 @@
 . "$topsrcdir/browser/config/mozconfigs/macosx64/common-opt"
 
 ac_add_options --disable-install-strip
 ac_add_options --enable-verify-mar
 ac_add_options --enable-instruments
-ac_add_options --enable-dmd
 
 # Cross-compiled builds fail when dtrace is enabled
 if test `uname -s` != Linux; then
   ac_add_options --enable-dtrace
 fi
 
 ac_add_options --enable-lto
 
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/macosx64/opt-dmd
@@ -0,0 +1,5 @@
+ac_add_options --enable-dmd
+
+. "$topsrcdir/browser/config/mozconfigs/macosx64/nightly"
+
+ac_add_options --disable-lto
--- a/browser/config/mozconfigs/whitelist
+++ b/browser/config/mozconfigs/whitelist
@@ -4,17 +4,16 @@ whitelist = {
     'release': {},
     'nightly': {},
     }
 
 all_platforms = ['win64', 'win32', 'linux32', 'linux64', 'macosx64']
 
 for platform in all_platforms:
     whitelist['nightly'][platform] = [
-        'ac_add_options --enable-dmd',
         'ac_add_options --with-branding=browser/branding/nightly',
     ]
 
 whitelist['nightly']['macosx64'] += [
     'ac_add_options --disable-install-strip',
     'ac_add_options --enable-instruments',
     'ac_add_options --enable-dtrace',
     'if test `uname -s` != Linux; then',
--- a/browser/config/mozconfigs/win32/nightly
+++ b/browser/config/mozconfigs/win32/nightly
@@ -1,9 +1,8 @@
 . "$topsrcdir/build/mozconfig.win-common"
 . "$topsrcdir/browser/config/mozconfigs/win32/common-opt"
 
 ac_add_options --enable-verify-mar
-ac_add_options --enable-dmd
 
 ac_add_options --with-branding=browser/branding/nightly
 
 . "$topsrcdir/build/mozconfig.common.override"
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/win32/opt-dmd
@@ -0,0 +1,3 @@
+ac_add_options --enable-dmd
+
+. "$topsrcdir/browser/config/mozconfigs/win32/nightly"
--- a/browser/config/mozconfigs/win64/nightly
+++ b/browser/config/mozconfigs/win64/nightly
@@ -1,10 +1,9 @@
 . "$topsrcdir/build/mozconfig.win-common"
 . "$topsrcdir/browser/config/mozconfigs/win64/common-win64"
 . "$topsrcdir/browser/config/mozconfigs/win64/common-opt"
 
 ac_add_options --enable-verify-mar
-ac_add_options --enable-dmd
 
 ac_add_options --with-branding=browser/branding/nightly
 
 . "$topsrcdir/build/mozconfig.common.override"
new file mode 100644
--- /dev/null
+++ b/browser/config/mozconfigs/win64/opt-dmd
@@ -0,0 +1,3 @@
+ac_add_options --enable-dmd
+
+. "$topsrcdir/browser/config/mozconfigs/win64/nightly"
--- a/devtools/client/debugger/new/test/mochitest/browser_dbg-stepping.js
+++ b/devtools/client/debugger/new/test/mochitest/browser_dbg-stepping.js
@@ -19,11 +19,11 @@ add_task(async function test() {
   await stepIn(dbg);
   await stepIn(dbg);
   await stepIn(dbg);
   await stepIn(dbg);
   await stepIn(dbg);
   await stepIn(dbg);
   await stepIn(dbg);
 
-  assertDebugLine(dbg, 42308);
+  assertDebugLine(dbg, 42267);
   assertPausedLocation(dbg);
 });
--- a/devtools/client/debugger/test/mochitest/browser_dbg_split-console-keypress.js
+++ b/devtools/client/debugger/test/mochitest/browser_dbg_split-console-keypress.js
@@ -44,22 +44,23 @@ function test() {
 
     // Information for sub-tests. When 'key' is synthesized 'keyRepeat' times,
     // cursor should be at 'caretLine' of this test..
     let stepTests = [
       {key: "KEY_F11", keyRepeat: 1, caretLine: 16},
       {key: "KEY_F11", keyRepeat: 2, caretLine: 18},
       {key: "KEY_F11", keyRepeat: 2, caretLine: 27},
       {key: "KEY_F10", keyRepeat: 1, caretLine: 27},
-      {key: "KEY_F11", keyRepeat: 1, caretLine: 18},
-      {key: "KEY_F11", keyRepeat: 5, caretLine: 32},
-      {key: "KEY_F11", modifier:"Shift", keyRepeat: 1, caretLine: 29},
-      {key: "KEY_F11", modifier:"Shift", keyRepeat: 2, caretLine: 34},
-      {key: "KEY_F11", modifier:"Shift", keyRepeat: 2, caretLine: 34}
+      {key: "KEY_F11", keyRepeat: 1, caretLine: 19},
+      {key: "KEY_F11", keyRepeat: 5, caretLine: 29},
+      {key: "KEY_F11", modifier:"Shift", keyRepeat: 1, caretLine: 32},
+      {key: "KEY_F11", modifier:"Shift", keyRepeat: 1, caretLine: 34},
+      {key: "KEY_F11", modifier:"Shift", keyRepeat: 1, caretLine: 34}
     ];
+
     // Trigger script that stops at debugger statement
     executeSoon(() => generateMouseClickInTab(gTab,
       "content.document.getElementById('start')"));
     yield waitForPause(gThreadClient);
 
     // Focus the console and add event listener to track whether it loses focus
     // (Must happen after generateMouseClickInTab() call)
     let consoleLostFocus = false;
--- a/devtools/server/actors/thread.js
+++ b/devtools/server/actors/thread.js
@@ -83,16 +83,17 @@ const ThreadActor = ActorClassWithSpec(t
 
     this.global = global;
 
     this._allEventsListener = this._allEventsListener.bind(this);
     this.onNewSourceEvent = this.onNewSourceEvent.bind(this);
     this.onUpdatedSourceEvent = this.onUpdatedSourceEvent.bind(this);
 
     this.uncaughtExceptionHook = this.uncaughtExceptionHook.bind(this);
+    this.createCompletionGrip = this.createCompletionGrip.bind(this);
     this.onDebuggerStatement = this.onDebuggerStatement.bind(this);
     this.onNewScript = this.onNewScript.bind(this);
     this.objectGrip = this.objectGrip.bind(this);
     this.pauseObjectGrip = this.pauseObjectGrip.bind(this);
     this._onWindowReady = this._onWindowReady.bind(this);
     this._onOpeningRequest = this._onOpeningRequest.bind(this);
     EventEmitter.on(this._parent, "window-ready", this._onWindowReady);
 
@@ -517,45 +518,53 @@ const ThreadActor = ActorClassWithSpec(t
       if (this.sources.isBlackBoxed(url)) {
         return undefined;
       }
 
       return pauseAndRespond(frame);
     };
   },
 
-  _makeOnPop: function({ thread, pauseAndRespond, createValueGrip: createValueGripHook,
-                          startLocation }) {
+  _makeOnPop: function({ thread, pauseAndRespond, startLocation, steppingType }) {
     const result = function(completion) {
       // onPop is called with 'this' set to the current frame.
       const generatedLocation = thread.sources.getFrameLocation(this);
-      const { originalSourceActor } = thread.unsafeSynchronize(
+      const originalLocation = thread.unsafeSynchronize(
         thread.sources.getOriginalLocation(generatedLocation)
       );
 
+      const { originalSourceActor } = originalLocation;
       const url = originalSourceActor.url;
 
       if (thread.sources.isBlackBoxed(url)) {
         return undefined;
       }
 
       // Note that we're popping this frame; we need to watch for
       // subsequent step events on its caller.
       this.reportedPop = true;
 
+      if (steppingType == "finish") {
+        const parentFrame = thread._getNextStepFrame(this);
+        if (parentFrame && parentFrame.script) {
+          const { onStep } = thread._makeSteppingHooks(
+            originalLocation, "next", false, completion
+          );
+          parentFrame.onStep = onStep;
+          return undefined;
+        }
+      }
+
       return pauseAndRespond(this, packet => {
-        packet.why.frameFinished = {};
-        if (!completion) {
-          packet.why.frameFinished.terminated = true;
-        } else if (completion.hasOwnProperty("return")) {
-          packet.why.frameFinished.return = createValueGripHook(completion.return);
-        } else if (completion.hasOwnProperty("yield")) {
-          packet.why.frameFinished.return = createValueGripHook(completion.yield);
+        if (completion) {
+          thread.createCompletionGrip(packet, completion);
         } else {
-          packet.why.frameFinished.throw = createValueGripHook(completion.throw);
+          packet.why.frameFinished = {
+            terminated: true
+          };
         }
         return packet;
       });
     };
 
     // When stepping out, we don't want to stop at a breakpoint that
     // happened to be set exactly at the spot where we stepped out.
     // See bug 970469.  We record the original location here and check
@@ -564,17 +573,17 @@ const ThreadActor = ActorClassWithSpec(t
     // frame, if we did we'd also have to find the appropriate spot to
     // clear it.
     result.originalLocation = startLocation;
 
     return result;
   },
 
   // Return whether reaching a script offset should be considered a distinct
-  // "step" from another location in the same frame.
+  // "step" from another location.
   _intraFrameLocationIsStepTarget: function(startLocation, script, offset) {
     // Only allow stepping stops at entry points for the line.
     if (!script.getOffsetLocation(offset).isEntryPoint) {
       return false;
     }
 
     // Cases when we have executed enough within a frame to consider a "step"
     // to have occured:
@@ -616,17 +625,17 @@ const ThreadActor = ActorClassWithSpec(t
     }
 
     // NOTE: if we do not find a pause point we want to
     // fall back on the old behavior (Case 3)
     return lineChanged;
   },
 
   _makeOnStep: function({ thread, pauseAndRespond, startFrame,
-                          startLocation, steppingType }) {
+                          startLocation, steppingType, completion, rewinding }) {
     // Breaking in place: we should always pause.
     if (steppingType === "break") {
       return () => pauseAndRespond(this);
     }
 
     // Otherwise take what a "step" means into consideration.
     return function() {
       // onStep is called with 'this' set to the current frame.
@@ -641,33 +650,55 @@ const ThreadActor = ActorClassWithSpec(t
       // 1. We are in a source mapped region, but inside a null mapping
       //    (doesn't correlate to any region of original source)
       // 2. The source we are in is black boxed.
       if (newLocation.originalUrl == null
           || thread.sources.isBlackBoxed(newLocation.originalUrl)) {
         return undefined;
       }
 
-      // A step has occurred if we have changed frames.
-      if (this !== startFrame) {
+      // A step has occurred if we are rewinding and have changed frames.
+      if (rewinding && this !== startFrame) {
         return pauseAndRespond(this);
       }
 
       // A step has occurred if we reached a step target.
       if (thread._intraFrameLocationIsStepTarget(startLocation,
                                                  this.script, this.offset)) {
-        return pauseAndRespond(this);
+        return pauseAndRespond(
+          this,
+          packet => thread.createCompletionGrip(packet, completion)
+        );
       }
 
       // Otherwise, let execution continue (we haven't executed enough code to
       // consider this a "step" yet).
       return undefined;
     };
   },
 
+  createCompletionGrip: function(packet, completion) {
+    if (!completion) {
+      return packet;
+    }
+
+    const createGrip = value => createValueGrip(value, this._pausePool, this.objectGrip);
+    packet.why.frameFinished = {};
+
+    if (completion.hasOwnProperty("return")) {
+      packet.why.frameFinished.return = createGrip(completion.return);
+    } else if (completion.hasOwnProperty("yield")) {
+      packet.why.frameFinished.return = createGrip(completion.yield);
+    } else if (completion.hasOwnProperty("throw")) {
+      packet.why.frameFinished.throw = createGrip(completion.throw);
+    }
+
+    return packet;
+  },
+
   /**
    * When replaying, we need to specify the offsets where a frame's onStep hook
    * should fire. Given that we are stepping forward (rewind == false) or
    * backwards (rewinding == true), return an array of all the step targets
    * that could be reached next from startLocation.
    */
   _findReplayingStepOffsets: function(startLocation, frame, rewinding) {
     const worklist = [frame.offset], seen = [], result = [];
@@ -691,33 +722,33 @@ const ThreadActor = ActorClassWithSpec(t
       }
     }
     return result;
   },
 
   /**
    * Define the JS hook functions for stepping.
    */
-  _makeSteppingHooks: function(startLocation, steppingType, rewinding) {
+  _makeSteppingHooks: function(startLocation, steppingType, rewinding, completion) {
     // Bind these methods and state because some of the hooks are called
     // with 'this' set to the current frame. Rather than repeating the
     // binding in each _makeOnX method, just do it once here and pass it
     // in to each function.
     const steppingHookState = {
       pauseAndRespond: (frame, onPacket = k=>k) => this._pauseAndRespond(
         frame,
         { type: "resumeLimit" },
         onPacket
       ),
-      createValueGrip: v => createValueGrip(v, this._pausePool, this.objectGrip),
       thread: this,
       startFrame: this.youngestFrame,
       startLocation: startLocation,
       steppingType: steppingType,
-      rewinding: rewinding
+      rewinding: rewinding,
+      completion
     };
 
     return {
       onEnterFrame: this._makeOnEnterFrame(steppingHookState),
       onPop: this._makeOnPop(steppingHookState),
       onStep: this._makeOnStep(steppingHookState)
     };
   },
new file mode 100644
--- /dev/null
+++ b/devtools/server/tests/unit/completions.js
@@ -0,0 +1,24 @@
+"use strict";
+/* exported global doRet doThrow */
+
+function ret() {
+  return 2;
+}
+
+function throws() {
+  throw new Error("yo");
+}
+
+function doRet() {
+  debugger;
+  const r = ret();
+  return r;
+}
+
+function doThrow() {
+  debugger;
+  try {
+    throws();
+  } catch (e) {
+  }
+}
--- a/devtools/server/tests/unit/head_dbg.js
+++ b/devtools/server/tests/unit/head_dbg.js
@@ -841,8 +841,37 @@ function getInflatedStackLocations(threa
     const frame = frameTable.data[stackEntry[STACK_FRAME_SLOT]];
     locations.push(stringTable[frame[FRAME_LOCATION_SLOT]]);
     stackIndex = stackEntry[STACK_PREFIX_SLOT];
   }
 
   // The profiler tree is inverted, so reverse the array.
   return locations.reverse();
 }
+
+async function setupTestFromUrl(url) {
+  do_test_pending();
+
+  const { createRootActor } = require("xpcshell-test/testactors");
+  DebuggerServer.setRootActor(createRootActor);
+  DebuggerServer.init(() => true);
+
+  const global = createTestGlobal("test");
+  DebuggerServer.addTestGlobal(global);
+
+  const debuggerClient = new DebuggerClient(DebuggerServer.connectPipe());
+  await connect(debuggerClient);
+
+  const { tabs } = await listTabs(debuggerClient);
+  const tab = findTab(tabs, "test");
+  const [, tabClient] = await attachTarget(debuggerClient, tab);
+
+  const [, threadClient] = await attachThread(tabClient);
+  await resume(threadClient);
+
+  const sourceUrl = getFileUrl(url);
+  const promise = waitForNewSource(threadClient, sourceUrl);
+  loadSubScript(sourceUrl, global);
+  const { source } = await promise;
+
+  const sourceClient = threadClient.source(source);
+  return { global, debuggerClient, threadClient, sourceClient };
+}
new file mode 100644
--- /dev/null
+++ b/devtools/server/tests/unit/stepping.js
@@ -0,0 +1,27 @@
+"use strict";
+/* exported global arithmetic composition chaining */
+
+const obj = { b };
+
+function a() {
+  return obj;
+}
+
+function b() {
+  return 2;
+}
+
+function arithmetic() {
+  debugger;
+  a() + b();
+}
+
+function composition() {
+  debugger;
+  b(a());
+}
+
+function chaining() {
+  debugger;
+  a().b();
+}
--- a/devtools/server/tests/unit/test_breakpoint-13.js
+++ b/devtools/server/tests/unit/test_breakpoint-13.js
@@ -63,28 +63,23 @@ function test_simple_breakpoint() {
           // Check that the breakpoint wasn't the reason for this pause, but
           // that the frame is about to be popped while stepping.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 3);
           Assert.notEqual(packet.why.type, "breakpoint");
           Assert.equal(packet.why.type, "resumeLimit");
           Assert.equal(packet.why.frameFinished.return.type, "undefined");
         },
         function(packet) {
-          // The foo function call frame was just popped from the stack.
+          // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(gDebuggee.a, 1);
           Assert.equal(gDebuggee.b, undefined);
-          Assert.equal(packet.frame.where.line, gDebuggee.line0 + 5);
-          Assert.equal(packet.why.type, "resumeLimit");
-          Assert.equal(packet.poppedFrames.length, 1);
-        },
-        function(packet) {
-          // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 6);
           Assert.notEqual(packet.why.type, "debuggerStatement");
           Assert.equal(packet.why.type, "resumeLimit");
+          Assert.equal(packet.poppedFrames.length, 1);
         },
         function(packet) {
           // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 7);
           Assert.notEqual(packet.why.type, "debuggerStatement");
           Assert.equal(packet.why.type, "resumeLimit");
         },
       ];
--- a/devtools/server/tests/unit/test_breakpoint-14.js
+++ b/devtools/server/tests/unit/test_breakpoint-14.js
@@ -61,28 +61,23 @@ function test_simple_breakpoint() {
         function(packet) {
           // The frame is about to be popped while stepping.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 3);
           Assert.notEqual(packet.why.type, "breakpoint");
           Assert.equal(packet.why.type, "resumeLimit");
           Assert.equal(packet.why.frameFinished.return.type, "undefined");
         },
         function(packet) {
-          // The foo function call frame was just popped from the stack.
+          // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(gDebuggee.a, 1);
           Assert.equal(gDebuggee.b, undefined);
-          Assert.equal(packet.frame.where.line, gDebuggee.line0 + 5);
-          Assert.equal(packet.why.type, "resumeLimit");
-          Assert.equal(packet.poppedFrames.length, 1);
-        },
-        function(packet) {
-          // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 6);
           Assert.notEqual(packet.why.type, "debuggerStatement");
           Assert.equal(packet.why.type, "resumeLimit");
+          Assert.equal(packet.poppedFrames.length, 1);
         },
         function(packet) {
           // Check that the debugger statement wasn't the reason for this pause.
           Assert.equal(packet.frame.where.line, gDebuggee.line0 + 7);
           Assert.notEqual(packet.why.type, "debuggerStatement");
           Assert.equal(packet.why.type, "resumeLimit");
         },
       ];
--- a/devtools/server/tests/unit/test_stepping-01.js
+++ b/devtools/server/tests/unit/test_stepping-01.js
@@ -1,81 +1,91 @@
 /* Any copyright is dedicated to the Public Domain.
    http://creativecommons.org/publicdomain/zero/1.0/ */
 /* eslint-disable no-shadow, max-nested-callbacks */
 
 "use strict";
 
 /**
- * Check basic step-over functionality.
+ * Check scenarios where we're leaving function a and
+ * going to the function b's call-site.
  */
 
-var gDebuggee;
-var gClient;
-var gCallback;
+async function testFinish({threadClient, debuggerClient}) {
+  await resume(threadClient);
+  await close(debuggerClient);
+
+  do_test_finished();
+}
 
-function run_test() {
-  do_test_pending();
-  run_test_with_server(DebuggerServer, function() {
-    run_test_with_server(WorkerDebuggerServer, do_test_finished);
-  });
+async function invokeAndPause({global, debuggerClient}, expression) {
+  return executeOnNextTickAndWaitForPause(
+    () => Cu.evalInSandbox(expression, global),
+    debuggerClient
+  );
+}
+
+async function step({threadClient, debuggerClient}, cmd) {
+  return cmd(debuggerClient, threadClient);
 }
 
-function run_test_with_server(server, callback) {
-  gCallback = callback;
-  initTestDebuggerServer(server);
-  gDebuggee = addTestGlobal("test-stepping", server);
-  gClient = new DebuggerClient(server.connectPipe());
-  gClient.connect(test_simple_stepping);
+function getPauseLocation(packet) {
+  const {line, column} = packet.frame.where;
+  return {line, column};
+}
+
+function getPauseReturn(packet) {
+  dump(`>> getPauseReturn yo ${JSON.stringify(packet.why)}\n`);
+  return packet.why.frameFinished.return;
+}
+
+async function steps(dbg, sequence) {
+  const locations = [];
+  for (const cmd of sequence) {
+    const packet = await step(dbg, cmd);
+    locations.push(getPauseLocation(packet));
+  }
+  return locations;
 }
 
-async function test_simple_stepping() {
-  const [attachResponse,, threadClient] = await attachTestTabAndResume(gClient,
-                                                                       "test-stepping");
-  ok(!attachResponse.error, "Should not get an error attaching");
-
-  dumpn("Evaluating test code and waiting for first debugger statement");
-  const dbgStmt = await executeOnNextTickAndWaitForPause(evaluateTestCode, gClient);
-  equal(dbgStmt.frame.where.line, 2, "Should be at debugger statement on line 2");
-  equal(gDebuggee.a, undefined);
-  equal(gDebuggee.b, undefined);
+async function stepOutOfA(dbg, func, expectedLocation) {
+  await invokeAndPause(dbg, `${func}()`);
+  await steps(dbg, [stepOver, stepIn]);
 
-  dumpn("Step Over to line 3");
-  const step1 = await stepOver(gClient, threadClient);
-  equal(step1.type, "paused");
-  equal(step1.why.type, "resumeLimit");
-  equal(step1.frame.where.line, 3);
-  equal(gDebuggee.a, undefined);
-  equal(gDebuggee.b, undefined);
+  dump(`>>> oof\n`);
+  const packet = await step(dbg, stepOut);
+  dump(`>>> foo\n`);
 
-  dumpn("Step Over to line 4");
-  const step3 = await stepOver(gClient, threadClient);
-  equal(step3.type, "paused");
-  equal(step3.why.type, "resumeLimit");
-  equal(step3.frame.where.line, 4);
-  equal(gDebuggee.a, 1);
-  equal(gDebuggee.b, undefined);
+  deepEqual(getPauseLocation(packet), expectedLocation, `step out location in ${func}`);
 
-  dumpn("Step Over to line 4 to leave the frame");
-  const step4 = await stepOver(gClient, threadClient);
-  equal(step4.type, "paused");
-  equal(step4.why.type, "resumeLimit");
-  equal(step4.frame.where.line, 4);
-  equal(gDebuggee.a, 1);
-  equal(gDebuggee.b, 2);
-
-  finishClient(gClient, gCallback);
+  await resume(dbg.threadClient);
 }
 
-function evaluateTestCode() {
-  /* eslint-disable */
-  Cu.evalInSandbox(
-    `                                   // 1
-    debugger;                           // 2
-    var a = 1;                          // 3
-    var b = 2;`,                        // 4
-    gDebuggee,
-    "1.8",
-    "test_stepping-01-test-code.js",
-    1
-  );
-  /* eslint-disable */
-}
\ No newline at end of file
+async function stepOverInA(dbg, func, expectedLocation) {
+  await invokeAndPause(dbg, `${func}()`);
+  await steps(dbg, [stepOver, stepIn, stepOver]);
+
+  let packet = await step(dbg, stepOver);
+  dump(`>> stepOverInA hi\n`);
+  equal(getPauseReturn(packet).ownPropertyLength, 1, "a() is returning obj");
+
+  packet = await step(dbg, stepOver);
+  deepEqual(getPauseLocation(packet), expectedLocation, `step out location in ${func}`);
+
+  await resume(dbg.threadClient);
+}
+
+async function testStep(dbg, func, expectedLocation) {
+  await stepOverInA(dbg, func, expectedLocation);
+  await stepOutOfA(dbg, func, expectedLocation);
+}
+
+function run_test() {
+  return (async function() {
+    const dbg = await setupTestFromUrl("stepping.js");
+
+    await testStep(dbg, "arithmetic", {line: 16, column: 8});
+    await testStep(dbg, "composition", {line: 21, column: 2});
+    await testStep(dbg, "chaining", {line: 26, column: 6});
+
+    await testFinish(dbg);
+  })();
+}
--- a/devtools/server/tests/unit/test_stepping-03.js
+++ b/devtools/server/tests/unit/test_stepping-03.js
@@ -32,17 +32,17 @@ async function test_simple_stepping() {
                                                                        "test-stepping");
   ok(!attachResponse.error, "Should not get an error attaching");
 
   dumpn("Evaluating test code and waiting for first debugger statement");
   await executeOnNextTickAndWaitForPause(evaluateTestCode, gClient);
 
   const step1 = await stepOut(gClient, threadClient);
   equal(step1.type, "paused");
-  equal(step1.frame.where.line, 6);
+  equal(step1.frame.where.line, 8);
   equal(step1.why.type, "resumeLimit");
 
   equal(gDebuggee.a, 1);
   equal(gDebuggee.b, 2);
 
   finishClient(gClient, gCallback);
 }
 
@@ -58,9 +58,9 @@ function evaluateTestCode() {
     f();                                // 7
     `,                                  // 8
     gDebuggee,
     "1.8",
     "test_stepping-01-test-code.js",
     1
   );
   /* eslint-disable */
-}
\ No newline at end of file
+}
--- a/devtools/server/tests/unit/test_stepping-06.js
+++ b/devtools/server/tests/unit/test_stepping-06.js
@@ -3,108 +3,145 @@
 /* eslint-disable no-shadow, max-nested-callbacks */
 
 "use strict";
 
 /**
  * Check that stepping out of a function returns the right return value.
  */
 
-var gDebuggee;
-var gClient;
-var gThreadClient;
-var gCallback;
+async function invokeAndPause({global, debuggerClient}, expression) {
+  return executeOnNextTickAndWaitForPause(
+    () => Cu.evalInSandbox(expression, global),
+    debuggerClient
+  );
+}
 
-function run_test() {
-  run_test_with_server(DebuggerServer, function() {
-    run_test_with_server(WorkerDebuggerServer, do_test_finished);
-  });
-  do_test_pending();
+async function step({threadClient, debuggerClient}, cmd) {
+  return cmd(debuggerClient, threadClient);
+}
+
+function getPauseLocation(packet) {
+  const {line, column} = packet.frame.where;
+  return {line, column};
 }
 
-function run_test_with_server(server, callback) {
-  gCallback = callback;
-  initTestDebuggerServer(server);
-  gDebuggee = addTestGlobal("test-stack", server);
-  gClient = new DebuggerClient(server.connectPipe());
-  gClient.connect().then(function() {
-    attachTestTabAndResume(
-      gClient, "test-stack",
-      function(response, tabClient, threadClient) {
-        gThreadClient = threadClient;
-        // XXX: We have to do an executeSoon so that the error isn't caught and
-        // reported by DebuggerClient.requester (because we are using the local
-        // transport and share a stack) which causes the test to fail.
-        Services.tm.dispatchToMainThread({
-          run: test_simple_stepping
-        });
-      });
-  });
+function getFrameFinished(packet) {
+  return packet.why.frameFinished;
+}
+
+async function steps(dbg, sequence) {
+  const locations = [];
+  for (const cmd of sequence) {
+    const packet = await step(dbg, cmd);
+    locations.push(getPauseLocation(packet));
+  }
+  return locations;
+}
+
+async function testFinish({threadClient, debuggerClient}) {
+  await resume(threadClient);
+  await close(debuggerClient);
+
+  do_test_finished();
 }
 
-async function test_simple_stepping() {
-  await executeOnNextTickAndWaitForPause(evaluateTestCode, gClient);
+async function testRet(dbg) {
+  let packet;
+
+  info(`1. Test returning from doRet via stepping over`);
+  await invokeAndPause(dbg, `doRet()`);
+  await steps(dbg, [stepOver, stepIn, stepOver]);
+  packet = await step(dbg, stepOver);
 
-  const step1 = await stepOut(gClient, gThreadClient);
-  equal(step1.type, "paused");
-  equal(step1.frame.where.line, 6);
-  equal(step1.why.type, "resumeLimit");
-  equal(step1.why.frameFinished.return, 10);
+  deepEqual(
+    getPauseLocation(packet),
+    {line: 6, column: 0},
+    `completion location in doRet`
+  );
+  deepEqual(
+    getFrameFinished(packet),
+    {"return": 2}, `completion value`);
 
-  gThreadClient.resume();
-  const step2 = await waitForPause(gThreadClient);
-  equal(step2.type, "paused");
-  equal(step2.frame.where.line, 8);
-  equal(step2.why.type, "debuggerStatement");
+  await resume(dbg.threadClient);
 
-  gThreadClient.stepOut();
-  const step3 = await waitForPause(gThreadClient);
-  equal(step3.type, "paused");
-  equal(step3.frame.where.line, 9);
-  equal(step3.why.type, "resumeLimit");
-  equal(step3.why.frameFinished.return.type, "undefined");
+  info(`2. Test leaving from doRet via stepping out`);
+  await invokeAndPause(dbg, `doRet()`);
+  await steps(dbg, [stepOver, stepIn]);
 
-  gThreadClient.resume();
-  const step4 = await waitForPause(gThreadClient);
+  packet = await step(dbg, stepOut);
 
-  equal(step4.type, "paused");
-  equal(step4.frame.where.line, 11);
+  deepEqual(
+    getPauseLocation(packet),
+    {line: 15, column: 2},
+    `completion location in doThrow`
+  );
 
-  gThreadClient.stepOut();
-  const step5 = await waitForPause(gThreadClient);
-  equal(step5.type, "paused");
-  equal(step5.frame.where.line, 12);
-  equal(step5.why.type, "resumeLimit");
-  equal(step5.why.frameFinished.throw, "ah");
+  deepEqual(
+    getFrameFinished(packet),
+    {"return": 2},
+    `completion completion value`
+  );
 
-  finishClient(gClient, gCallback);
+  await resume(dbg.threadClient);
 }
 
-function evaluateTestCode() {
-  /* eslint-disable */
-  Cu.evalInSandbox(
-    `                                   //  1
-    function f() {                      //  2
-      debugger;                         //  3
-      var a = 10;                       //  4
-      return a;                         //  5
-    }                                   //  6
-    function g() {                      //  7
-      debugger;                         //  8
-    }                                   //  9
-    function h() {                      // 10
-      debugger;                         // 11
-      throw 'ah';                       // 12
-      return 2;                         // 13
-    }                                   // 14
-    f()                                 // 15
-    g()                                 // 16
-    try {                               // 17
-      h();                              // 18
-    } catch (ex) { };                   // 19
-    `,                                  // 20
-    gDebuggee,
-    "1.8",
-    "test_stepping-07-test-code.js",
-    1
+async function testThrow(dbg) {
+  let packet;
+
+  info(`3. Test leaving from doThrow via stepping over`);
+  await invokeAndPause(dbg, `doThrow()`);
+  await steps(dbg, [stepOver, stepOver, stepIn]);
+  packet = await step(dbg, stepOver);
+
+  deepEqual(
+    getPauseLocation(packet),
+    {line: 9, column: 8},
+    `completion location in doThrow`
+  );
+
+  deepEqual(
+    getFrameFinished(packet).throw.class,
+    "Error",
+    `completion value class`
+  );
+  deepEqual(
+    getFrameFinished(packet).throw.preview.message,
+    "yo",
+    `completion value preview`
   );
-  /* eslint-enable */
+
+  await resume(dbg.threadClient);
+
+  info(`4. Test leaving from doThrow via stepping out`);
+  await invokeAndPause(dbg, `doThrow()`);
+  await steps(dbg, [stepOver, stepOver, stepIn]);
+
+  packet = await step(dbg, stepOut);
+  deepEqual(
+    getPauseLocation(packet),
+    {line: 22, column: 14},
+    `completion location in doThrow`
+  );
+
+  deepEqual(
+    getFrameFinished(packet).throw.class,
+    "Error",
+    `completion completion value class`
+  );
+  deepEqual(
+    getFrameFinished(packet).throw.preview.message,
+    "yo",
+    `completion completion value preview`
+  );
+  await resume(dbg.threadClient);
 }
+
+function run_test() {
+  return (async function() {
+    const dbg = await setupTestFromUrl("completions.js");
+
+    await testRet(dbg);
+    await testThrow(dbg);
+
+    await testFinish(dbg);
+  })();
+}
--- a/devtools/server/tests/unit/test_stepping-08.js
+++ b/devtools/server/tests/unit/test_stepping-08.js
@@ -41,17 +41,17 @@ async function testStepOutWithBreakpoint
 
   dumpn("Step in to innerFunction");
   const step1 = await stepIn(gClient, threadClient);
   equal(step1.frame.where.line, 7);
 
   dumpn("Step out of innerFunction");
   const step2 = await stepOut(gClient, threadClient);
   // The bug was that we'd stop again at the breakpoint on line 7.
-  equal(step2.frame.where.line, 10);
+  equal(step2.frame.where.line, 4);
 
   finishClient(gClient, gCallback);
 }
 
 function evaluateTestCode() {
   /* eslint-disable */
   Cu.evalInSandbox(
     `                                   //  1
--- a/devtools/server/tests/unit/xpcshell.ini
+++ b/devtools/server/tests/unit/xpcshell.ini
@@ -1,26 +1,28 @@
 [DEFAULT]
 tags = devtools
 head = head_dbg.js
 firefox-appdir = browser
 skip-if = toolkit == 'android'
 
 support-files =
   babel_and_browserify_script_with_source_map.js
+  completions.js
   source-map-data/sourcemapped.coffee
   source-map-data/sourcemapped.map
   post_init_global_actors.js
   post_init_target_scoped_actors.js
   pre_init_global_actors.js
   pre_init_target_scoped_actors.js
   registertestactors-lazy.js
   sourcemapped.js
   testactors.js
   hello-actor.js
+  stepping.js
   setBreakpoint-on-column.js
   setBreakpoint-on-column-in-gcd-script.js
   setBreakpoint-on-column-with-no-offsets.js
   setBreakpoint-on-column-with-no-offsets-in-gcd-script.js
   setBreakpoint-on-line.js
   setBreakpoint-on-line-in-gcd-script.js
   setBreakpoint-on-line-with-multiple-offsets.js
   setBreakpoint-on-line-with-multiple-statements.js
--- a/dom/security/featurepolicy/FeaturePolicyUtils.cpp
+++ b/dom/security/featurepolicy/FeaturePolicyUtils.cpp
@@ -21,31 +21,31 @@ struct FeatureMap {
 };
 
 /*
  * IMPORTANT: Do not change this list without review from a DOM peer _AND_ a
  * DOM Security peer!
  */
 static FeatureMap sSupportedFeatures[] = {
   // TODO: not supported yet!!!
-  { "autoplay", FeatureMap::eSelf },
+  { "autoplay", FeatureMap::eAll },
   // TODO: not supported yet!!!
-  { "camera", FeatureMap::eSelf  },
-  { "encrypted-media", FeatureMap::eSelf  },
+  { "camera", FeatureMap::eAll  },
+  { "encrypted-media", FeatureMap::eAll  },
   // TODO: not supported yet!!!
-  { "fullscreen", FeatureMap::eSelf  },
+  { "fullscreen", FeatureMap::eAll  },
   // TODO: not supported yet!!!
-  { "geolocation", FeatureMap::eSelf  },
+  { "geolocation", FeatureMap::eAll  },
   // TODO: not supported yet!!!
-  { "microphone", FeatureMap::eSelf  },
-  { "midi", FeatureMap::eSelf  },
-  { "payment", FeatureMap::eSelf  },
+  { "microphone", FeatureMap::eAll  },
+  { "midi", FeatureMap::eAll  },
+  { "payment", FeatureMap::eAll  },
   // TODO: not supported yet!!!
-  { "speaker", FeatureMap::eSelf  },
-  { "vr", FeatureMap::eSelf  },
+  { "speaker", FeatureMap::eAll  },
+  { "vr", FeatureMap::eAll  },
 };
 
 /* static */ bool
 FeaturePolicyUtils::IsSupportedFeature(const nsAString& aFeatureName)
 {
   uint32_t numFeatures = (sizeof(sSupportedFeatures) / sizeof(sSupportedFeatures[0]));
   for (uint32_t i = 0; i < numFeatures; ++i) {
     if (aFeatureName.LowerCaseEqualsASCII(sSupportedFeatures[i].mFeatureName)) {
--- a/dom/security/featurepolicy/test/mochitest/test_parser.html
+++ b/dom/security/featurepolicy/test/mochitest/test_parser.html
@@ -21,17 +21,17 @@ function test_document() {
   ok(document.policy.allowsFeature("camera"), "Camera is always enabled");
   ok(document.policy.allowsFeature("camera", "http://foo.bar"), "Camera is always enabled");
   let allowed = document.policy.getAllowlistForFeature("camera");
   is(allowed.length, 1, "Only 1 entry in allowlist for camera");
   is(allowed[0], "*", "allowlist is *");
 
   ok(document.policy.allowsFeature("geolocation"), "Geolocation is enabled for self");
   ok(document.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for self");
-  ok(!document.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is not enabled for anything else");
+  ok(!document.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is not enabled for any random URL");
   allowed = document.policy.getAllowlistForFeature("geolocation");
   is(allowed.length, 1, "Only 1 entry in allowlist for geolocation");
   is(allowed[0], location.origin, "allowlist is self");
 
   ok(!document.policy.allowsFeature("microphone"), "Microphone is disabled for self");
   ok(!document.policy.allowsFeature("microphone", location.origin), "Microphone is disabled for self");
   ok(!document.policy.allowsFeature("microphone", "http://foo.bar"), "Microphone is disabled for foo.bar");
   ok(document.policy.allowsFeature("microphone", "http://example.com"), "Microphone is enabled for example.com");
@@ -63,29 +63,29 @@ function test_document() {
 function test_iframe_without_allow() {
   info("Checking HTMLIFrameElement.policy");
   let ifr = document.getElementById("ifr");
   ok("policy" in ifr, "HTMLIFrameElement.policy exists");
 
   ok(!ifr.policy.allowsFeature("foobar"), "Random feature");
   ok(!ifr.policy.allowsFeature("foobar", "http://www.something.net"), "Random feature");
 
-  ok(ifr.policy.allowsFeature("camera"), "Camera is always enabled for self");
-  ok(ifr.policy.allowsFeature("camera", location.origin), "Camera is allowed for self");
-  ok(!ifr.policy.allowsFeature("camera", "http://foo.bar"), "Camera is not allowed for a random URL");
+  ok(ifr.policy.allowsFeature("camera"), "Camera is always allowed");
+  ok(ifr.policy.allowsFeature("camera", location.origin), "Camera is always allowed");
+  ok(ifr.policy.allowsFeature("camera", "http://foo.bar"), "Camera is always allowed");
   let allowed = ifr.policy.getAllowlistForFeature("camera");
   is(allowed.length, 1, "Only 1 entry in allowlist for camera");
-  is(allowed[0], location.origin, "allowlist is 'self'");
+  is(allowed[0], "*", "allowlist is '*'");
 
-  ok(ifr.policy.allowsFeature("geolocation"), "Geolocation is enabled for self");
-  ok(ifr.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for self");
-  ok(!ifr.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is not enabled for anything else");
+  ok(ifr.policy.allowsFeature("geolocation"), "Geolocation is enabled for all");
+  ok(ifr.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for all");
+  ok(ifr.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is allowed for any random URL");
   allowed = ifr.policy.getAllowlistForFeature("geolocation");
   is(allowed.length, 1, "Only 1 entry in allowlist for geolocation");
-  is(allowed[0], location.origin, "allowlist is self");
+  is(allowed[0], "*", "allowlist is '*'");
 
   ok(!ifr.policy.allowsFeature("microphone"), "Microphone is disabled for self");
   ok(!ifr.policy.allowsFeature("microphone", location.origin), "Microphone is disabled for self");
   ok(!ifr.policy.allowsFeature("microphone", "http://foo.bar"), "Microphone is disabled for foo.bar");
   ok(!ifr.policy.allowsFeature("microphone", "http://example.com"), "Microphone is disabled for example.com");
   ok(!ifr.policy.allowsFeature("microphone", "http://example.org"), "Microphone is disabled for example.org");
   allowed = ifr.policy.getAllowlistForFeature("microphone");
   is(allowed.length, 0, "No allowlist for microphone");
@@ -115,22 +115,22 @@ function test_iframe_with_allow() {
 
   ok(!ifr.policy.allowsFeature("foobar"), "Random feature");
   ok(!ifr.policy.allowsFeature("foobar", "http://www.something.net"), "Random feature");
 
   ok(!ifr.policy.allowsFeature("camera"), "Camera is not enabled");
   let allowed = ifr.policy.getAllowlistForFeature("camera");
   is(allowed.length, 0, "Camera has an empty allowlist");
 
-  ok(ifr.policy.allowsFeature("geolocation"), "Geolocation is enabled for self");
-  ok(ifr.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for self");
-  ok(!ifr.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is not enabled for anything else");
+  ok(ifr.policy.allowsFeature("geolocation"), "Geolocation is enabled for all");
+  ok(ifr.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for all");
+  ok(ifr.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is enabled for all");
   allowed = ifr.policy.getAllowlistForFeature("geolocation");
   is(allowed.length, 1, "Only 1 entry in allowlist for geolocation");
-  is(allowed[0], location.origin, "allowlist is self");
+  is(allowed[0], "*", "allowlist is '*'");
 
   ok(!ifr.policy.allowsFeature("microphone"), "Microphone is disabled for self");
   ok(!ifr.policy.allowsFeature("microphone", location.origin), "Microphone is disabled for self");
   ok(!ifr.policy.allowsFeature("microphone", "http://foo.bar"), "Microphone is disabled for foo.bar");
   ok(!ifr.policy.allowsFeature("microphone", "http://example.com"), "Microphone is disabled for example.com");
   ok(!ifr.policy.allowsFeature("microphone", "http://example.org"), "Microphone is disabled for example.org");
   allowed = ifr.policy.getAllowlistForFeature("microphone");
   is(allowed.length, 0, "No allowlist for microphone");
@@ -152,28 +152,28 @@ function test_iframe_contentDocument() {
   let ifr = document.createElement("iframe");
   ifr.setAttribute("src", "empty.html");
   ifr.onload = function() {
     ok("policy" in ifr.contentDocument, "We have ifr.contentDocument.policy");
 
     ok(!ifr.contentDocument.policy.allowsFeature("foobar"), "Random feature");
     ok(!ifr.contentDocument.policy.allowsFeature("foobar", "http://www.something.net"), "Random feature");
 
-    ok(ifr.contentDocument.policy.allowsFeature("camera"), "Camera is always enabled for self");
-    ok(!ifr.contentDocument.policy.allowsFeature("camera", "http://foo.bar"), "Camera is not allowed for a random URL");
+    ok(ifr.contentDocument.policy.allowsFeature("camera"), "Camera is always allowed");
+    ok(ifr.contentDocument.policy.allowsFeature("camera", "http://foo.bar"), "Camera is always allowed");
     let allowed = ifr.contentDocument.policy.getAllowlistForFeature("camera");
     is(allowed.length, 1, "Only 1 entry in allowlist for camera");
-    is(allowed[0], location.origin, "allowlist is self");
+    is(allowed[0], "*", "allowlist is '*'");
 
-    ok(ifr.contentDocument.policy.allowsFeature("geolocation"), "Geolocation is enabled for self");
-    ok(ifr.contentDocument.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for self");
-    ok(!ifr.contentDocument.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is not enabled for anything else");
+    ok(ifr.contentDocument.policy.allowsFeature("geolocation"), "Geolocation is enabled for all");
+    ok(ifr.contentDocument.policy.allowsFeature("geolocation", location.origin), "Geolocation is enabled for all");
+    ok(ifr.contentDocument.policy.allowsFeature("geolocation", "http://foo.bar"), "Geolocation is enabled for any random URL");
     allowed = ifr.contentDocument.policy.getAllowlistForFeature("geolocation");
     is(allowed.length, 1, "Only 1 entry in allowlist for geolocation");
-    is(allowed[0], location.origin, "allowlist is self");
+    is(allowed[0], "*", "allowlist is '*'");
 
     ok(!ifr.contentDocument.policy.allowsFeature("microphone"), "Microphone is disabled for self");
     ok(!ifr.contentDocument.policy.allowsFeature("microphone", location.origin), "Microphone is disabled for self");
     ok(!ifr.contentDocument.policy.allowsFeature("microphone", "http://foo.bar"), "Microphone is disabled for foo.bar");
     ok(!ifr.contentDocument.policy.allowsFeature("microphone", "http://example.com"), "Microphone is enabled for example.com");
     ok(!ifr.contentDocument.policy.allowsFeature("microphone", "http://example.org"), "Microphone is enabled for example.org");
     allowed = ifr.contentDocument.policy.getAllowlistForFeature("microphone");
     is(allowed.length, 0, "No allowlist for microphone");
--- a/js/src/jit/mips32/MacroAssembler-mips32.cpp
+++ b/js/src/jit/mips32/MacroAssembler-mips32.cpp
@@ -218,70 +218,103 @@ MacroAssemblerMIPS::ma_liPatchable(Regis
 }
 
 // Arithmetic-based ops.
 
 // Add.
 void
 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
 {
-    Label goodAddition;
+    MOZ_ASSERT_IF(rs == rd, rs != rt);
+    MOZ_ASSERT(rs != ScratchRegister);
+    MOZ_ASSERT(rt != ScratchRegister);
+    MOZ_ASSERT(rd != rt);
+    MOZ_ASSERT(rd != ScratchRegister);
+    MOZ_ASSERT(rd != SecondScratchReg);
+
+    if (rs == rt) {
+       as_addu(rd, rs, rs);
+       as_xor(SecondScratchReg, rs, rd);
+       ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+       return;
+    }
+
+    // If different sign, no overflow
+    as_xor(ScratchRegister, rs, rt);
+
     as_addu(rd, rs, rt);
-
-    as_xor(ScratchRegister, rs, rt); // If different sign, no overflow
-    ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
-
+    as_nor(ScratchRegister, ScratchRegister, zero);
     // If different sign, then overflow
-    as_xor(ScratchRegister, rs, rd);
-    ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
-
-    bind(&goodAddition);
+    as_xor(SecondScratchReg, rt, rd);
+    as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+    ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
+
 }
 
 void
 MacroAssemblerMIPS::ma_addTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow)
 {
-    // Check for signed range because of as_addiu
-    // Check for unsigned range because of as_xori
-    if (Imm16::IsInSignedRange(imm.value) && Imm16::IsInUnsignedRange(imm.value)) {
-        Label goodAddition;
+    MOZ_ASSERT(rs != ScratchRegister);
+    MOZ_ASSERT(rs != SecondScratchReg);
+    MOZ_ASSERT(rd != ScratchRegister);
+    MOZ_ASSERT(rd != SecondScratchReg);
+
+    Register rs_copy = rs;
+
+    if (imm.value > 0) {
+        as_nor(ScratchRegister, rs, zero);
+    } else if (rs == rd) {
+        ma_move(ScratchRegister, rs);
+        rs_copy = ScratchRegister;
+    }
+
+    if (Imm16::IsInSignedRange(imm.value)) {
         as_addiu(rd, rs, imm.value);
-
-        // If different sign, no overflow
-        as_xori(ScratchRegister, rs, imm.value);
-        ma_b(ScratchRegister, Imm32(0), &goodAddition, Assembler::LessThan, ShortJump);
-
-        // If different sign, then overflow
-        as_xor(ScratchRegister, rs, rd);
-        ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
-
-        bind(&goodAddition);
     } else {
-        ma_li(ScratchRegister, imm);
-        ma_addTestOverflow(rd, rs, ScratchRegister, overflow);
+        ma_li(SecondScratchReg, imm);
+        as_addu(rd, rs, SecondScratchReg);
     }
+
+    if (imm.value > 0) {
+        as_and(ScratchRegister, ScratchRegister, rd);
+    } else {
+        as_nor(SecondScratchReg, rd, zero);
+        as_and(ScratchRegister, rs_copy, SecondScratchReg);
+    }
+
+    ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
 }
 
 // Subtract.
 void
 MacroAssemblerMIPS::ma_subTestOverflow(Register rd, Register rs, Register rt, Label* overflow)
 {
-    Label goodSubtraction;
-    // Use second scratch. The instructions generated by ma_b don't use the
-    // second scratch register.
+    // The rs == rt case should probably be folded at MIR stage.
+    // Happens for Number_isInteger*. Not worth specializing here.
+    MOZ_ASSERT_IF(rs == rd, rs != rt);
+    MOZ_ASSERT(rs != SecondScratchReg);
+    MOZ_ASSERT(rt != SecondScratchReg);
+    MOZ_ASSERT(rd != rt);
+    MOZ_ASSERT(rd != ScratchRegister);
+    MOZ_ASSERT(rd != SecondScratchReg);
+
+    Register rs_copy = rs;
+
+    if (rs == rd) {
+       ma_move(SecondScratchReg, rs);
+       rs_copy = SecondScratchReg;
+    }
+
     as_subu(rd, rs, rt);
-
-    as_xor(ScratchRegister, rs, rt); // If same sign, no overflow
-    ma_b(ScratchRegister, Imm32(0), &goodSubtraction, Assembler::GreaterThanOrEqual, ShortJump);
-
+    // If same sign, no overflow
+    as_xor(ScratchRegister, rs_copy, rt);
     // If different sign, then overflow
-    as_xor(ScratchRegister, rs, rd);
-    ma_b(ScratchRegister, Imm32(0), overflow, Assembler::LessThan);
-
-    bind(&goodSubtraction);
+    as_xor(SecondScratchReg, rs_copy, rd);
+    as_and(SecondScratchReg, SecondScratchReg, ScratchRegister);
+    ma_b(SecondScratchReg, Imm32(0), overflow, Assembler::LessThan);
 }
 
 // Memory.
 
 void
 MacroAssemblerMIPS::ma_load(Register dest, Address address,
                             LoadStoreSize size, LoadStoreExtension extension)
 {
@@ -1771,21 +1804,21 @@ MacroAssemblerMIPSCompat::loadValue(Addr
         ma_lw(val.typeReg(), Address(src.base, src.offset + TAG_OFFSET));
         ma_lw(val.payloadReg(), Address(src.base, src.offset + PAYLOAD_OFFSET));
     }
 }
 
 void
 MacroAssemblerMIPSCompat::tagValue(JSValueType type, Register payload, ValueOperand dest)
 {
-    MOZ_ASSERT(payload != dest.typeReg());
-    ma_li(dest.typeReg(), ImmType(type));
+    MOZ_ASSERT(dest.typeReg() != dest.payloadReg());
     if (payload != dest.payloadReg()) {
         ma_move(dest.payloadReg(), payload);
     }
+    ma_li(dest.typeReg(), ImmType(type));
 }
 
 void
 MacroAssemblerMIPSCompat::pushValue(ValueOperand val)
 {
     // Allocate stack slots for type and payload. One for each.
     asMasm().subPtr(Imm32(sizeof(Value)), StackPointer);
     // Store type and payload.
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -349,20 +349,18 @@ def load_wpt_tests(requested_paths, excl
         for test in it:
             if test[1].get("jsshell"):
                 yield test
 
     test_manifests = testloader.ManifestLoader(test_paths, types=["testharness"],
                                                meta_filters=[filter_jsshell_tests]).load()
 
     run_info_extras = products.load_product(kwargs["config"], "firefox")[-1](**kwargs)
-    is_automation = os.environ.get('AUTOMATION', False)
     run_info = wpttest.get_run_info(kwargs["metadata_root"], "firefox",
-                                    debug=debug, extras=run_info_extras,
-                                    raise_exception=is_automation)
+                                    debug=debug, extras=run_info_extras)
 
     path_filter = testloader.TestFilter(test_manifests,
                                         include=requested_paths,
                                         exclude=excluded_paths)
     loader = testloader.TestLoader(test_manifests,
                                    ["testharness"],
                                    run_info,
                                    manifest_filters=[path_filter])
--- a/layout/style/test/property_database.js
+++ b/layout/style/test/property_database.js
@@ -125,16 +125,22 @@ var validGradientAndElementValues = [
   "linear-gradient(.414rad, red 50%, 50%, blue 50%)",
   "linear-gradient(.414rad, red 50%, 20%, blue 50%)",
   "linear-gradient(.414rad, red 50%, 30%, blue 10%)",
   "linear-gradient(to right bottom, red, 20%, green 50%, 65%, blue)",
   "linear-gradient(to right bottom, red, 20%, green 10%, blue)",
   "linear-gradient(to right bottom, red, 50%, green 50%, 50%, blue)",
   "linear-gradient(to right bottom, red, 0%, green 50%, 100%, blue)",
 
+  "linear-gradient(red 0% 100%)",
+  "linear-gradient(red 0% 50%, blue 50%)",
+  "linear-gradient(red 0% 50%, blue 50% 100%)",
+  "linear-gradient(red 0% 50%, 0%, blue 50%)",
+  "linear-gradient(red 0% 50%, 0%, blue 50% 100%)",
+
   /* Unitless 0 is valid as an <angle> */
   "linear-gradient(0, red, blue)",
 
   "radial-gradient(red, blue)",
   "radial-gradient(red, yellow, blue)",
   "radial-gradient(red 1px, yellow 20%, blue 24em, green)",
   "radial-gradient(red, yellow, green, blue 50%)",
   "radial-gradient(red -50%, yellow -25%, green, blue)",
@@ -178,16 +184,22 @@ var validGradientAndElementValues = [
   "radial-gradient(farthest-corner circle at 4em, red, blue)",
 
   "radial-gradient(30% 40% at top left, red, blue)",
   "radial-gradient(50px 60px at 15% 20%, red, blue)",
   "radial-gradient(7em 8em at 45px, red, blue)",
 
   "radial-gradient(circle at 15% 20%, red, blue)",
 
+  "radial-gradient(red 0% 100%)",
+  "radial-gradient(red 0% 50%, blue 50%)",
+  "radial-gradient(red 0% 50%, blue 50% 100%)",
+  "radial-gradient(red 0% 50%, 0%, blue 50%)",
+  "radial-gradient(red 0% 50%, 0%, blue 50% 100%)",
+
   "repeating-radial-gradient(red, blue)",
   "repeating-radial-gradient(red, yellow, blue)",
   "repeating-radial-gradient(red 1px, yellow 20%, blue 24em, green)",
   "repeating-radial-gradient(red, yellow, green, blue 50%)",
   "repeating-radial-gradient(red -50%, yellow -25%, green, blue)",
   "repeating-radial-gradient(red -99px, yellow, green, blue 120%)",
   "repeating-radial-gradient(#ffff00, #ef3, rgba(10, 20, 30, 0.4))",
   "repeating-radial-gradient(rgba(10, 20, 30, 0.4), #ffff00, #ef3)",
@@ -323,16 +335,26 @@ var invalidGradientAndElementValues = [
   "radial-gradient(399grad, ellipse closest-corner, red, blue)",
   "radial-gradient(399grad, farthest-side circle, red, blue)",
 
   "radial-gradient(top left 99deg, cover, red, blue)",
   "radial-gradient(15% 20% -1.2345rad, circle, red, blue)",
   "radial-gradient(45px 399grad, ellipse closest-corner, red, blue)",
   "radial-gradient(45px 399grad, farthest-side circle, red, blue)",
   "radial-gradient(circle red, blue)",
+
+  /* don't allow more than two positions with multi-position syntax */
+  "linear-gradient(red 0% 50% 100%)",
+  "linear-gradient(red 0% 50% 75%, blue 75%)",
+  "linear-gradient(to bottom, red 0% 50% 100%)",
+  "linear-gradient(to bottom, red 0% 50% 75%, blue 75%)",
+  "radial-gradient(red 0% 50% 100%)",
+  "radial-gradient(red 0% 50% 75%, blue 75%)",
+  "radial-gradient(center, red 0% 50% 100%)",
+  "radial-gradient(center, red 0% 50% 75%, blue 75%)",
 ];
 var unbalancedGradientAndElementValues = [
   "-moz-element(#a()",
 ];
 
 var basicShapeSVGBoxValues = [
   "fill-box",
   "stroke-box",
--- a/memory/replace/dmd/dmd.py
+++ b/memory/replace/dmd/dmd.py
@@ -327,27 +327,25 @@ def getDigestFromFile(args, inputFile):
 
     def buildTraceDescription(traceTable, frameTable, traceKey):
         frameKeys = traceTable[traceKey]
         fmt = '    #{:02d}{:}'
 
         if args.filter_stacks_for_testing:
             # When running SmokeDMD.cpp, every stack trace should contain at
             # least one frame that contains 'DMD.cpp', from either |DMD.cpp| or
-            # |SmokeDMD.cpp|. (Or 'dmd.cpp' on Windows.) On builds without
-            # debuginfo we expect just |SmokeDMD|. If we see such a
+            # |SmokeDMD.cpp|. (Or 'dmd.cpp' on Windows.) If we see such a
             # frame, we replace the entire stack trace with a single,
             # predictable frame. There is too much variation in the stack
             # traces across different machines and platforms to do more precise
             # matching, but this level of matching will result in failure if
             # stack fixing fails completely.
             for frameKey in frameKeys:
                 frameDesc = frameTable[frameKey]
-                expected = ('DMD.cpp', 'dmd.cpp', 'SmokeDMD')
-                if any(ex in frameDesc for ex in expected):
+                if 'DMD.cpp' in frameDesc or 'dmd.cpp' in frameDesc:
                     return [fmt.format(1, ': ... DMD.cpp ...')]
 
         # The frame number is always '#00' (see DMD.h for why), so we have to
         # replace that with the correct frame number.
         desc = []
         for n, frameKey in enumerate(traceTable[traceKey], start=1):
             desc.append(fmt.format(n, frameTable[frameKey][3:]))
         return desc
--- a/memory/replace/dmd/test/xpcshell.ini
+++ b/memory/replace/dmd/test/xpcshell.ini
@@ -26,9 +26,9 @@ support-files =
   script-diff-live2.json
   script-diff-live-expected.txt
   script-diff-dark-matter1.json
   script-diff-dark-matter2.json
   script-diff-dark-matter-expected.txt
 
 [test_dmd.js]
 dmd = true
-skip-if = !(os=='linux' || os=='mac' || (os=='win' && !pgo))
+skip-if = !(os=='linux' || os=='win' || os=='mac')
--- a/python/mozbuild/mozbuild/action/download_wpt_manifest.py
+++ b/python/mozbuild/mozbuild/action/download_wpt_manifest.py
@@ -7,15 +7,16 @@
 import os
 import sys
 
 import buildconfig
 
 
 def main():
     print("Downloading wpt manifest")
+    man_path = os.path.join(buildconfig.topobjdir, '_tests', 'web-platform')
     sys.path.insert(0, buildconfig.topsrcdir)
-    import manifestupdate
-    return 0 if manifestupdate.run(buildconfig.topsrcdir, buildconfig.topobjdir) else 1
+    import manifestdownload
+    manifestdownload.run(man_path, buildconfig.topsrcdir, force=True)
 
 
 if __name__ == '__main__':
     sys.exit(main())
--- a/python/mozbuild/mozbuild/frontend/emitter.py
+++ b/python/mozbuild/mozbuild/frontend/emitter.py
@@ -105,16 +105,28 @@ class TreeMetadataEmitter(LoggingMixin):
     reader.BuildReader and converts it into the classes defined in the data
     module.
     """
 
     def __init__(self, config):
         self.populate_logger()
 
         self.config = config
+
+        mozinfo.find_and_update_from_json(config.topobjdir)
+
+        # Python 2.6 doesn't allow unicode keys to be used for keyword
+        # arguments. This gross hack works around the problem until we
+        # rid ourselves of 2.6.
+        self.info = {}
+        for k, v in mozinfo.info.items():
+            if isinstance(k, unicode):
+                k = k.encode('ascii')
+            self.info[k] = v
+
         self._libs = OrderedDefaultDict(list)
         self._binaries = OrderedDict()
         self._compile_dirs = set()
         self._host_compile_dirs = set()
         self._asm_compile_dirs = set()
         self._compile_flags = dict()
         self._compile_as_flags = dict()
         self._linkage = []
--- a/servo/components/style/values/specified/image.rs
+++ b/servo/components/style/values/specified/image.rs
@@ -3,17 +3,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 //! CSS handling for the specified value of
 //! [`image`][image]s
 //!
 //! [image]: https://drafts.csswg.org/css-images/#image-values
 
 use Atom;
-use cssparser::{Parser, Token};
+use cssparser::{Parser, Token, Delimiter};
 use custom_properties::SpecifiedValue;
 use parser::{Parse, ParserContext};
 use selectors::parser::SelectorParseErrorKind;
 #[cfg(feature = "servo")]
 use servo_url::ServoUrl;
 use std::cmp::Ordering;
 use std::f32::consts::PI;
 use std::fmt::{self, Write};
@@ -951,27 +951,53 @@ impl ShapeExtent {
     }
 }
 
 impl GradientItem {
     fn parse_comma_separated<'i, 't>(
         context: &ParserContext,
         input: &mut Parser<'i, 't>,
     ) -> Result<Vec<Self>, ParseError<'i>> {
+        let mut items = Vec::new();
         let mut seen_stop = false;
-        let items = input.parse_comma_separated(|input| {
-            if seen_stop {
-                if let Ok(hint) = input.try(|i| LengthOrPercentage::parse(context, i)) {
-                    seen_stop = false;
-                    return Ok(generic::GradientItem::InterpolationHint(hint));
+
+        loop {
+            input.parse_until_before(Delimiter::Comma, |input| {
+                if seen_stop {
+                    if let Ok(hint) = input.try(|i| LengthOrPercentage::parse(context, i)) {
+                        seen_stop = false;
+                        items.push(generic::GradientItem::InterpolationHint(hint));
+                        return Ok(());
+                    }
                 }
+
+                let stop = ColorStop::parse(context, input)?;
+
+                if let Ok(multi_position) = input.try(|i| LengthOrPercentage::parse(context, i)) {
+                    let stop_color = stop.color.clone();
+                    items.push(generic::GradientItem::ColorStop(stop));
+                    items.push(generic::GradientItem::ColorStop(ColorStop {
+                        color: stop_color,
+                        position: Some(multi_position),
+                    }));
+                } else {
+                    items.push(generic::GradientItem::ColorStop(stop));
+                }
+
+                seen_stop = true;
+                Ok(())
+            })?;
+
+            match input.next() {
+                Err(_) => break,
+                Ok(&Token::Comma) => continue,
+                Ok(_) => unreachable!(),
             }
-            seen_stop = true;
-            ColorStop::parse(context, input).map(generic::GradientItem::ColorStop)
-        })?;
+        }
+
         if !seen_stop || items.len() < 2 {
             return Err(input.new_custom_error(StyleParseErrorKind::UnspecifiedError));
         }
         Ok(items)
     }
 }
 
 impl Parse for ColorStop {
--- a/taskcluster/ci/build/linux.yml
+++ b/taskcluster/ci/build/linux.yml
@@ -55,16 +55,50 @@ linux64-plain/opt:
     run-on-projects: [trunk]
     toolchains:
         - linux64-binutils
         - linux64-clang
         - linux64-rust
         - linux64-node
         - linux64-cbindgen
 
+linux64-dmd/opt:
+    description: "Linux64 DMD Opt"
+    index:
+        product: firefox
+        job-name: linux64-dmd-opt
+    treeherder:
+        platform: linux64-dmd/opt
+        symbol: Bdmd
+        tier: 2
+    worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+    worker:
+        max-run-time: 3600
+    run:
+        using: mozharness
+        actions: [get-secrets build check-test]
+        config:
+            - builds/releng_base_firefox.py
+            - builds/releng_base_linux_64_builds.py
+        script: "mozharness/scripts/fx_desktop_build.py"
+        extra-config:
+            mozconfig_variant: 'opt-dmd'
+        secrets: true
+        tooltool-downloads: public
+        need-xvfb: true
+    run-on-projects: []
+    toolchains:
+        - linux64-binutils
+        - linux64-clang
+        - linux64-rust
+        - linux64-rust-size
+        - linux64-cbindgen
+        - linux64-sccache
+        - linux64-node
+
 linux64/pgo:
     description: "Linux64 PGO"
     index:
         product: firefox
         job-name: linux64-pgo
     attributes:
         enable-full-crashsymbols: true
     treeherder:
--- a/taskcluster/ci/build/macosx.yml
+++ b/taskcluster/ci/build/macosx.yml
@@ -104,16 +104,54 @@ macosx64-asan-fuzzing/opt:
         - linux64-libdmg
         - linux64-llvm-dsymutil
         - linux64-rust-macos
         - linux64-rust-size
         - linux64-cbindgen
         - linux64-sccache
         - linux64-node
 
+macosx64-dmd/opt:
+    description: "MacOS X x64 DMD Cross-compile"
+    index:
+        product: firefox
+        job-name: macosx64-dmd-opt
+    treeherder:
+        platform: osx-10-10-dmd/opt
+        symbol: Bdmd
+        tier: 2
+    worker-type: aws-provisioner-v1/gecko-{level}-b-linux
+    worker:
+        max-run-time: 3600
+        env:
+            TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/macosx64/cross-releng.manifest"
+    run:
+        using: mozharness
+        actions: [get-secrets build]
+        config:
+            - builds/releng_base_firefox.py
+            - builds/releng_base_mac_64_cross_builds.py
+        script: "mozharness/scripts/fx_desktop_build.py"
+        extra-config:
+            mozconfig_variant: 'opt-dmd'
+        secrets: true
+        tooltool-downloads: internal
+    run-on-projects: []
+    toolchains:
+        - linux64-cctools-port
+        - linux64-clang
+        - linux64-hfsplus
+        - linux64-libdmg
+        - linux64-llvm-dsymutil
+        - linux64-rust-macos
+        - linux64-rust-size
+        - linux64-cbindgen
+        - linux64-sccache
+        - linux64-node
+
 macosx64-devedition-nightly/opt:
     description: "MacOS X Dev Edition x64 Nightly"
     attributes:
         nightly: true
         enable-full-crashsymbols: true
     shipping-phase: build
     shipping-product: devedition
     index:
--- a/taskcluster/ci/build/windows.yml
+++ b/taskcluster/ci/build/windows.yml
@@ -65,16 +65,50 @@ win32/opt:
     toolchains:
         - win64-clang-cl
         - win64-rust
         - win64-rust-size
         - win64-cbindgen
         - win64-sccache
         - win64-node
 
+win32-dmd/opt:
+    description: "Win32 DMD Opt"
+    index:
+        product: firefox
+        job-name: win32-dmd-opt
+    treeherder:
+        platform: windows2012-32-dmd/opt
+        symbol: Bdmd
+        tier: 2
+    worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+    worker:
+        max-run-time: 7200
+        env:
+            TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win32/releng.manifest"
+    run:
+        using: mozharness
+        options: [append-env-variables-from-configs]
+        script: mozharness/scripts/fx_desktop_build.py
+        config:
+            - builds/releng_base_firefox.py
+            - builds/taskcluster_base_windows.py
+            - builds/taskcluster_base_win32.py
+            - builds/taskcluster_sub_win32/opt.py
+        extra-config:
+            mozconfig_variant: 'opt-dmd'
+    run-on-projects: []
+    toolchains:
+        - win64-clang-cl
+        - win64-rust
+        - win64-rust-size
+        - win64-cbindgen
+        - win64-sccache
+        - win64-node
+
 win32/pgo:
     description: "Win32 Opt PGO"
     index:
         product: firefox
         job-name: win32-pgo
     attributes:
         enable-full-crashsymbols: true
     treeherder:
@@ -226,16 +260,50 @@ win64-plain/opt:
             - builds/taskcluster_sub_win64/plain_opt.py
     run-on-projects: [trunk]
     toolchains:
         - win64-clang-cl
         - win64-rust
         - win64-node
         - win64-cbindgen
 
+win64-dmd/opt:
+    description: "Win64 DMD Opt"
+    index:
+        product: firefox
+        job-name: win64-dmd-opt
+    treeherder:
+        platform: windows2012-64-dmd/opt
+        symbol: Bdmd
+        tier: 2
+    worker-type: aws-provisioner-v1/gecko-{level}-b-win2012
+    worker:
+        max-run-time: 7200
+        env:
+            TOOLTOOL_MANIFEST: "browser/config/tooltool-manifests/win64/releng.manifest"
+    run:
+        using: mozharness
+        options: [append-env-variables-from-configs]
+        script: mozharness/scripts/fx_desktop_build.py
+        config:
+            - builds/releng_base_firefox.py
+            - builds/taskcluster_base_windows.py
+            - builds/taskcluster_base_win64.py
+            - builds/taskcluster_sub_win64/opt.py
+        extra-config:
+            mozconfig_variant: 'opt-dmd'
+    run-on-projects: []
+    toolchains:
+        - win64-clang-cl
+        - win64-rust
+        - win64-rust-size
+        - win64-cbindgen
+        - win64-sccache
+        - win64-node
+
 win32-nightly/opt:
     description: "Win32 Nightly"
     index:
         product: firefox
         job-name: win32-opt
         type: nightly
     attributes:
         nightly: true
--- a/taskcluster/ci/source-test/wpt-manifest.yml
+++ b/taskcluster/ci/source-test/wpt-manifest.yml
@@ -15,17 +15,17 @@ upload:
     index:
         product: source
         job-name: manifest-upload
         rank: build_date
     run:
         using: run-task
         command: >
             cd /builds/worker/checkouts/gecko
-            && ./mach wpt-manifest-update --config testing/web-platform/wptrunner.ini
+            && ./mach wpt-manifest-update
             && tar -cvzf manifests.tar.gz -C testing/web-platform/ meta/MANIFEST.json mozilla/meta/MANIFEST.json
     worker:
         artifacts:
             - type: file
               path: /builds/worker/checkouts/gecko/manifests.tar.gz
               name: public/manifests.tar.gz
 
         max-run-time: 3600
--- a/taskcluster/taskgraph/target_tasks.py
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -548,16 +548,26 @@ def target_tasks_nightly_desktop(full_ta
         set(target_tasks_nightly_win32(full_task_graph, parameters, graph_config))
         | set(target_tasks_nightly_win64(full_task_graph, parameters, graph_config))
         | set(target_tasks_nightly_macosx(full_task_graph, parameters, graph_config))
         | set(target_tasks_nightly_linux(full_task_graph, parameters, graph_config))
         | set(target_tasks_nightly_asan(full_task_graph, parameters, graph_config))
     )
 
 
+# Opt DMD builds should only run nightly
+@_target_task('nightly_dmd')
+def target_tasks_dmd(full_task_graph, parameters, graph_config):
+    """Target DMD that run nightly on the m-c branch."""
+    def filter(task):
+        platform = task.attributes.get('build_platform', '')
+        return platform.endswith('-dmd')
+    return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
+
+
 # Run Searchfox analysis once daily.
 @_target_task('searchfox_index')
 def target_tasks_searchfox(full_task_graph, parameters, graph_config):
     """Select tasks required for indexing Firefox for Searchfox web site each day"""
     # For now we only do Linux and Mac debug builds. Windows builds
     # are currently broken (bug 1418415).
     return ['searchfox-linux64-searchfox/debug',
             'searchfox-macosx64-searchfox/debug']
--- a/testing/mozbase/mozinfo/mozinfo/mozinfo.py
+++ b/testing/mozbase/mozinfo/mozinfo/mozinfo.py
@@ -178,24 +178,23 @@ def sanitize(info):
         else:
             info["processor"] = "x86"
             info["bits"] = 32
 
 # method for updating information
 
 
 def update(new_info):
-    """Adds information from json file to the global symbol table.
-
-    Given the parameter new_info, this method will look for the file.
-    If found, the file is read into a buffer and assumed to be json.
+    """
+    Update the info.
 
     :param new_info: Either a dict containing the new info or a path/url
                      to a json file containing the new info.
     """
+
     PY3 = sys.version_info[0] == 3
     if PY3:
         string_types = str,
     else:
         string_types = basestring,
     if isinstance(new_info, string_types):
         # lazy import
         import mozfile
@@ -211,67 +210,49 @@ def update(new_info):
     # convenience data for os access
     for os_name in choices['os']:
         globals()['is' + os_name.title()] = info['os'] == os_name
     # unix is special
     if isLinux or isBsd:  # noqa
         globals()['isUnix'] = True
 
 
-def find_and_update_from_json(*dirs, **kwargs):
-    """Find a mozinfo.json file, load it, and update global symbol table.
-
-    This method will first check the relevant objdir directory for the
-    necessary mozinfo.json file, if the current script is being run from a
-    Mozilla objdir.
-
-    If the objdir directory did not supply the necessary data, this method
-    will then look for the required mozinfo.json file from the provided
-    tuple of directories.
+def find_and_update_from_json(*dirs):
+    """
+    Find a mozinfo.json file, load it, and update the info with the
+    contents.
 
-    If file is found, the global symbols table is updated via a helper method.
-
-    If no valid files are found, an exception is raised.
+    :param dirs: Directories in which to look for the file. They will be
+                 searched after first looking in the root of the objdir
+                 if the current script is being run from a Mozilla objdir.
 
-    :param tuple dirs: Directories in which to look for the file.
-    :param dict kwargs: optional values:
-                        raise_exception: if this value is provided, the default
-                                         behavior of raising an exception is
-                                         overridden.
-    :returns: EnvironmentError: default behavior.
-              None: if exception raising is suppressed.
-              json_path: string representation of path.
+    Returns the full path to mozinfo.json if it was found, or None otherwise.
     """
     # First, see if we're in an objdir
     try:
         from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
         from mozbuild.mozconfig import MozconfigFindException
         build = MozbuildObject.from_environment()
         json_path = _os.path.join(build.topobjdir, "mozinfo.json")
         if _os.path.isfile(json_path):
             update(json_path)
             return json_path
     except ImportError:
         pass
     except (BuildEnvironmentNotFoundException, MozconfigFindException):
         pass
 
-    raise_exception = kwargs.get('raise_exception', True)
-
     for d in dirs:
         d = _os.path.abspath(d)
         json_path = _os.path.join(d, "mozinfo.json")
         if _os.path.isfile(json_path):
             update(json_path)
             return json_path
 
-    if raise_exception:
-        raise EnvironmentError('{}: could not find any mozinfo.json.'.format(__name__))
-    else:
-        return None
+    return None
 
 
 def output_to_file(path):
     import json
     with open(path, 'w') as f:
         f.write(json.dumps(info))
 
 
--- a/testing/mozbase/mozinfo/tests/test.py
+++ b/testing/mozbase/mozinfo/tests/test.py
@@ -70,67 +70,24 @@ class TestMozinfo(unittest.TestCase):
         """Test that mozinfo.find_and_update_from_json can
         find mozinfo.json in a directory passed to it."""
         j = os.path.join(self.tempdir, "mozinfo.json")
         with open(j, "w") as f:
             f.write(json.dumps({"foo": "abcdefg"}))
         self.assertEqual(mozinfo.find_and_update_from_json(self.tempdir), j)
         self.assertEqual(mozinfo.info["foo"], "abcdefg")
 
-    def test_find_and_update_file_no_argument(self):
-        """Test that mozinfo.find_and_update_from_json can
-        handle not being given any arguments.
-        """
-        with self.assertRaises(EnvironmentError):
-            self.assertEqual(mozinfo.find_and_update_from_json())
-
     def test_find_and_update_file_invalid_json(self):
         """Test that mozinfo.find_and_update_from_json can
         handle invalid JSON"""
         j = os.path.join(self.tempdir, "mozinfo.json")
         with open(j, 'w') as f:
             f.write('invalid{"json":')
         self.assertRaises(ValueError, mozinfo.find_and_update_from_json, self.tempdir)
 
-    def test_find_and_update_file_raise_exception(self):
-        """Test that mozinfo.find_and_update_from_json raises
-        an IOError exception if a True boolean value is
-        provided as the only argument.
-        """
-        with self.assertRaises(EnvironmentError):
-            mozinfo.find_and_update_from_json(raise_exception=True)
-
-    def test_find_and_update_file_raise_exception_multiple_arguments(self):
-        """Test that mozinfo.find_and_update_from_json raises
-        an IOError exception if a True boolean value is
-        provided as last positional argument.
-        """
-        with self.assertRaises(EnvironmentError):
-            mozinfo.find_and_update_from_json(self.tempdir, raise_exception=True)
-
-    def test_find_and_update_file_suppress_exception(self):
-        """Test that mozinfo.find_and_update_from_json suppresses
-        an IOError exception if a False boolean value is
-        provided as the only argument.
-        """
-        self.assertEqual(mozinfo.find_and_update_from_json(
-            raise_exception=False), None)
-
-    def test_find_and_update_file_suppress_exception_multiple_arguments(self):
-        """Test that mozinfo.find_and_update_from_json suppresses
-        an IOError exception if a False boolean value is
-        provided as last positional argument.
-        """
-        j = os.path.join(self.tempdir, "mozinfo.json")
-        with open(j, "w") as f:
-            f.write(json.dumps({"foo": "abcdefg"}))
-        self.assertEqual(mozinfo.find_and_update_from_json(
-            self.tempdir, raise_exception=False), j)
-        self.assertEqual(mozinfo.info["foo"], "abcdefg")
-
     def test_find_and_update_file_mozbuild(self):
         """Test that mozinfo.find_and_update_from_json can
         find mozinfo.json using the mozbuild module."""
         j = os.path.join(self.tempdir, "mozinfo.json")
         with open(j, "w") as f:
             f.write(json.dumps({"foo": "123456"}))
         m = mock.MagicMock()
         # Mock the value of MozbuildObject.from_environment().topobjdir.
--- a/testing/mozbase/moztest/moztest/resolve.py
+++ b/testing/mozbase/moztest/moztest/resolve.py
@@ -445,24 +445,34 @@ class TestMetadata(object):
             return True
         return False
 
     def add_wpt_manifest_data(self):
         if self._wpt_loaded:
             return
 
         wpt_path = os.path.join(self._srcdir, "testing", "web-platform")
-        sys.path = [wpt_path] + sys.path
+        wptrunner_path = os.path.join(wpt_path, "tests", "tools", "wptrunner")
+        manifest_path = os.path.join(self._objdir, "_tests", "web-platform")
+
+        sys.path = [wpt_path, wptrunner_path] + sys.path
+
+        import manifestdownload
+        import wptrunner
+        from wptrunner.wptrunner import testloader
 
-        import manifestupdate
-        manifests = manifestupdate.run(self._srcdir, self._objdir, rebuild=False, download=True,
-                                       config_path=None, rewrite_config=True, update=True)
-        if not manifests:
-            print("Loading wpt manifest failed")
-            return
+        manifestdownload.run(manifest_path, self._srcdir)
+
+        kwargs = {"config": os.path.join(self._objdir, "_tests", "web-platform",
+                                         "wptrunner.local.ini"),
+                  "tests_root": None,
+                  "metadata_root": None}
+
+        wptrunner.wptcommandline.set_from_config(kwargs)
+        manifests = testloader.ManifestLoader(kwargs["test_paths"]).load()
 
         for manifest, data in manifests.iteritems():
             tests_root = data["tests_path"]
             for test_type, path, tests in manifest:
                 full_path = os.path.join(tests_root, path)
                 src_path = os.path.relpath(full_path, self._srcdir)
                 if test_type not in ["testharness", "reftest", "wdspec"]:
                     continue
--- a/testing/mozharness/mozharness/mozilla/building/buildbase.py
+++ b/testing/mozharness/mozharness/mozilla/building/buildbase.py
@@ -389,16 +389,17 @@ class BuildOptionParser(object):
         'android-checkstyle': 'builds/releng_sub_%s_configs/%s_checkstyle.py',
         'android-lint': 'builds/releng_sub_%s_configs/%s_lint.py',
         'android-findbugs': 'builds/releng_sub_%s_configs/%s_findbugs.py',
         'android-geckoview-docs': 'builds/releng_sub_%s_configs/%s_geckoview_docs.py',
         'valgrind': 'builds/releng_sub_%s_configs/%s_valgrind.py',
         'artifact': 'builds/releng_sub_%s_configs/%s_artifact.py',
         'debug-artifact': 'builds/releng_sub_%s_configs/%s_debug_artifact.py',
         'devedition': 'builds/releng_sub_%s_configs/%s_devedition.py',
+        'dmd': 'builds/releng_sub_%s_configs/%s_dmd.py',
         'tup': 'builds/releng_sub_%s_configs/%s_tup.py',
     }
     build_pool_cfg_file = 'builds/build_pool_specifics.py'
     branch_cfg_file = 'builds/branch_specifics.py'
 
     @classmethod
     def _query_pltfrm_and_bits(cls, target_option, options):
         """ determine platform and bits
--- a/testing/mozharness/mozharness/mozilla/testing/android.py
+++ b/testing/mozharness/mozharness/mozilla/testing/android.py
@@ -238,8 +238,33 @@ class AndroidMixin(object):
             for p in glob.glob(os.path.join(xre_dir, 'host-utils-*')):
                 if os.path.isdir(p) and os.path.isfile(os.path.join(p, 'xpcshell')):
                     xre_path = p
             if not xre_path:
                 self.fatal("xre path not found in %s" % xre_dir)
         else:
             self.fatal("configure hostutils_manifest_path!")
         return xre_path
+
+    def query_package_name(self):
+        if self.app_name is None:
+            # For convenience, assume geckoview.test/geckoview_example when install
+            # target looks like geckoview.
+            if 'androidTest' in self.installer_path:
+                self.app_name = 'org.mozilla.geckoview.test'
+            elif 'geckoview' in self.installer_path:
+                self.app_name = 'org.mozilla.geckoview_example'
+        if self.app_name is None:
+            # Find appname from package-name.txt - assumes download-and-extract
+            # has completed successfully.
+            # The app/package name will typically be org.mozilla.fennec,
+            # but org.mozilla.firefox for release builds, and there may be
+            # other variations. 'aapt dump badging <apk>' could be used as an
+            # alternative to package-name.txt, but introduces a dependency
+            # on aapt, found currently in the Android SDK build-tools component.
+            apk_dir = self.abs_dirs['abs_work_dir']
+            self.apk_path = os.path.join(apk_dir, self.installer_path)
+            unzip = self.query_exe("unzip")
+            package_path = os.path.join(apk_dir, 'package-name.txt')
+            unzip_cmd = [unzip, '-q', '-o', self.apk_path]
+            self.run_command(unzip_cmd, cwd=apk_dir, halt_on_failure=True)
+            self.app_name = str(self.read_from_file(package_path, verbose=True)).rstrip()
+        return self.app_name
--- a/testing/mozharness/scripts/android_emulator_unittest.py
+++ b/testing/mozharness/scripts/android_emulator_unittest.py
@@ -168,41 +168,16 @@ class AndroidEmulatorTest(TestingMixin, 
     def _query_tests_dir(self, test_suite):
         dirs = self.query_abs_dirs()
         try:
             test_dir = self.config["suite_definitions"][test_suite]["testsdir"]
         except Exception:
             test_dir = test_suite
         return os.path.join(dirs['abs_test_install_dir'], test_dir)
 
-    def _query_package_name(self):
-        if self.app_name is None:
-            # For convenience, assume geckoview.test/geckoview_example when install
-            # target looks like geckoview.
-            if 'androidTest' in self.installer_path:
-                self.app_name = 'org.mozilla.geckoview.test'
-            elif 'geckoview' in self.installer_path:
-                self.app_name = 'org.mozilla.geckoview_example'
-        if self.app_name is None:
-            # Find appname from package-name.txt - assumes download-and-extract
-            # has completed successfully.
-            # The app/package name will typically be org.mozilla.fennec,
-            # but org.mozilla.firefox for release builds, and there may be
-            # other variations. 'aapt dump badging <apk>' could be used as an
-            # alternative to package-name.txt, but introduces a dependency
-            # on aapt, found currently in the Android SDK build-tools component.
-            apk_dir = self.abs_dirs['abs_work_dir']
-            self.apk_path = os.path.join(apk_dir, self.installer_path)
-            unzip = self.query_exe("unzip")
-            package_path = os.path.join(apk_dir, 'package-name.txt')
-            unzip_cmd = [unzip, '-q', '-o', self.apk_path]
-            self.run_command(unzip_cmd, cwd=apk_dir, halt_on_failure=True)
-            self.app_name = str(self.read_from_file(package_path, verbose=True)).rstrip()
-        return self.app_name
-
     def _launch_emulator(self):
         env = self.query_env()
 
         # Write a default ddms.cfg to avoid unwanted prompts
         avd_home_dir = self.abs_dirs['abs_avds_dir']
         DDMS_FILE = os.path.join(avd_home_dir, "ddms.cfg")
         with open(DDMS_FILE, 'w') as f:
             f.write("pingOptIn=false\npingId=0\n")
@@ -371,17 +346,17 @@ class AndroidEmulatorTest(TestingMixin, 
             opt = option.split('=')[0]
             # override configured chunk options with script args, if specified
             if opt in ('--this-chunk', '--total-chunks'):
                 if user_paths or getattr(self, opt.replace('-', '_').strip('_'), None) is not None:
                     continue
 
             if '%(app)' in option:
                 # only query package name if requested
-                cmd.extend([option % {'app': self._query_package_name()}])
+                cmd.extend([option % {'app': self.query_package_name()}])
             else:
                 option = option % str_format_values
                 if option:
                     cmd.extend([option])
 
         if not (self.verify_enabled or self.per_test_coverage):
             if user_paths:
                 cmd.extend(user_paths.split(':'))
@@ -549,28 +524,17 @@ class AndroidEmulatorTest(TestingMixin, 
         """
         super(AndroidEmulatorTest, self).download_and_extract(
             suite_categories=self._query_suite_categories())
         dirs = self.query_abs_dirs()
         if self.test_suite and self.test_suite.startswith('robocop'):
             robocop_url = self.installer_url[:self.installer_url.rfind('/')] + '/robocop.apk'
             self.info("Downloading robocop...")
             self.download_file(robocop_url, 'robocop.apk', dirs['abs_work_dir'], error_level=FATAL)
-        self.rmtree(dirs['abs_xre_dir'])
-        self.mkdir_p(dirs['abs_xre_dir'])
-        if self.config["hostutils_manifest_path"]:
-            url = self._get_repo_url(self.config["hostutils_manifest_path"])
-            self._tooltool_fetch(url, dirs['abs_xre_dir'])
-            for p in glob.glob(os.path.join(dirs['abs_xre_dir'], 'host-utils-*')):
-                if os.path.isdir(p) and os.path.isfile(os.path.join(p, 'xpcshell')):
-                    self.xre_path = p
-            if not self.xre_path:
-                self.fatal("xre path not found in %s" % dirs['abs_xre_dir'])
-        else:
-            self.fatal("configure hostutils_manifest_path!")
+        self.xre_path = self.download_hostutils(dirs['abs_xre_dir'])
 
     def install(self):
         """
         Install APKs on the device.
         """
         install_needed = (not self.test_suite) or \
             self.config["suite_definitions"][self.test_suite].get("install")
         if install_needed is False:
--- a/testing/mozharness/scripts/android_hardware_unittest.py
+++ b/testing/mozharness/scripts/android_hardware_unittest.py
@@ -2,17 +2,16 @@
 # ***** BEGIN LICENSE BLOCK *****
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this file,
 # You can obtain one at http://mozilla.org/MPL/2.0/.
 # ***** END LICENSE BLOCK *****
 
 import copy
 import datetime
-import glob
 import os
 import re
 import sys
 import subprocess
 
 # load modules from parent dir
 sys.path.insert(1, os.path.dirname(sys.path[0]))
 
@@ -153,41 +152,16 @@ class AndroidHardwareTest(TestingMixin, 
     def _query_tests_dir(self):
         dirs = self.query_abs_dirs()
         try:
             test_dir = self.config["suite_definitions"][self.test_suite]["testsdir"]
         except Exception:
             test_dir = self.test_suite
         return os.path.join(dirs['abs_test_install_dir'], test_dir)
 
-    def _query_package_name(self):
-        if self.app_name is None:
-            # For convenience, assume geckoview.test/geckoview_example when install
-            # target looks like geckoview.
-            if 'androidTest' in self.installer_path:
-                self.app_name = 'org.mozilla.geckoview.test'
-            elif 'geckoview' in self.installer_path:
-                self.app_name = 'org.mozilla.geckoview_example'
-        if self.app_name is None:
-            # Find appname from package-name.txt - assumes download-and-extract
-            # has completed successfully.
-            # The app/package name will typically be org.mozilla.fennec,
-            # but org.mozilla.firefox for release builds, and there may be
-            # other variations. 'aapt dump badging <apk>' could be used as an
-            # alternative to package-name.txt, but introduces a dependency
-            # on aapt, found currently in the Android SDK build-tools component.
-            apk_dir = self.abs_dirs['abs_work_dir']
-            self.apk_path = os.path.join(apk_dir, self.installer_path)
-            unzip = self.query_exe("unzip")
-            package_path = os.path.join(apk_dir, 'package-name.txt')
-            unzip_cmd = [unzip, '-q', '-o', self.apk_path]
-            self.run_command(unzip_cmd, cwd=apk_dir, halt_on_failure=True)
-            self.app_name = str(self.read_from_file(package_path, verbose=True)).rstrip()
-        return self.app_name
-
     def _build_command(self):
         c = self.config
         dirs = self.query_abs_dirs()
 
         if self.test_suite not in self.config["suite_definitions"]:
             self.fatal("Key '%s' not defined in the config!" % self.test_suite)
 
         cmd = [
@@ -237,17 +211,17 @@ class AndroidHardwareTest(TestingMixin, 
             opt = option.split('=')[0]
             # override configured chunk options with script args, if specified
             if opt in ('--this-chunk', '--total-chunks'):
                 if user_paths or getattr(self, opt.replace('-', '_').strip('_'), None) is not None:
                     continue
 
             if '%(app)' in option:
                 # only query package name if requested
-                cmd.extend([option % {'app': self._query_package_name()}])
+                cmd.extend([option % {'app': self.query_package_name()}])
             else:
                 option = option % str_format_values
                 if option:
                     cmd.extend([option])
 
         if user_paths:
             cmd.extend(user_paths.split(':'))
         elif not self.verify_enabled:
@@ -332,28 +306,17 @@ class AndroidHardwareTest(TestingMixin, 
         """
         super(AndroidHardwareTest, self).download_and_extract(
             suite_categories=self._query_suite_categories())
         dirs = self.query_abs_dirs()
         if self.test_suite and self.test_suite.startswith('robocop'):
             robocop_url = self.installer_url[:self.installer_url.rfind('/')] + '/robocop.apk'
             self.info("Downloading robocop...")
             self.download_file(robocop_url, 'robocop.apk', dirs['abs_work_dir'], error_level=FATAL)
-        self.rmtree(dirs['abs_xre_dir'])
-        self.mkdir_p(dirs['abs_xre_dir'])
-        if self.config["hostutils_manifest_path"]:
-            url = self._get_repo_url(self.config["hostutils_manifest_path"])
-            self._tooltool_fetch(url, dirs['abs_xre_dir'])
-            for p in glob.glob(os.path.join(dirs['abs_xre_dir'], 'host-utils-*')):
-                if os.path.isdir(p) and os.path.isfile(os.path.join(p, 'xpcshell')):
-                    self.xre_path = p
-            if not self.xre_path:
-                self.fatal("xre path not found in %s" % dirs['abs_xre_dir'])
-        else:
-            self.fatal("configure hostutils_manifest_path!")
+        self.xre_path = self.download_hostutils(dirs['abs_xre_dir'])
 
     def install(self):
         """
         Install APKs on the device.
         """
         install_needed = (not self.test_suite) or \
             self.config["suite_definitions"][self.test_suite].get("install")
         if install_needed is False:
--- a/testing/web-platform/mach_commands.py
+++ b/testing/web-platform/mach_commands.py
@@ -268,16 +268,39 @@ testing/web-platform/tests for tests tha
             if ref_path:
                 path = "%s %s" % (path, ref_path)
             proc = subprocess.Popen("%s %s" % (editor, path), shell=True)
 
         if proc:
             proc.wait()
 
 
+class WPTManifestUpdater(MozbuildObject):
+    def setup_logging(self, **kwargs):
+        from wptrunner import wptlogging
+        logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
+
+    def run_update(self, logger, rebuild=False, **kwargs):
+        import manifestupdate
+        wpt_dir = os.path.abspath(os.path.join(self.topsrcdir, 'testing', 'web-platform'))
+        config_dir = os.path.abspath(os.path.join(self.topobjdir, '_tests', 'web-platform'))
+        manifestupdate.update(logger, wpt_dir, rebuild, config_dir)
+
+
+class WPTManifestDownloader(MozbuildObject):
+    def setup_logging(self, **kwargs):
+        from wptrunner import wptlogging
+        logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
+
+    def run_download(self, logger, manifest_update=True, force=False, **kwargs):
+        import manifestdownload
+        wpt_dir = os.path.abspath(os.path.join(self.topobjdir, '_tests', 'web-platform'))
+        manifestdownload.run(wpt_dir, self.topsrcdir, logger, force, manifest_update)
+
+
 def create_parser_update():
     from update import updatecommandline
     return updatecommandline.create_parser()
 
 
 def create_parser_create():
     import argparse
     p = argparse.ArgumentParser()
@@ -298,16 +321,20 @@ def create_parser_create():
     p.add_argument("path", action="store", help="Path to the test file")
     return p
 
 
 def create_parser_manifest_update():
     import manifestupdate
     return manifestupdate.create_parser()
 
+def create_parser_manifest_download():
+    import manifestdownload
+    return manifestdownload.create_parser()
+
 
 @CommandProvider
 class MachCommands(MachCommandBase):
     def setup(self):
         self._activate_virtualenv()
 
     @Command("web-platform-tests",
              category="testing",
@@ -325,16 +352,18 @@ class MachCommands(MachCommandBase):
                 params["include"].append(item["name"])
             del params["test_objects"]
 
         wpt_setup = self._spawn(WebPlatformTestsRunnerSetup)
         wpt_runner = WebPlatformTestsRunner(wpt_setup)
 
         logger = wpt_runner.setup_logging(**params)
 
+        self.wpt_manifest_download(logger, **params)
+        params["manifest_update"] = False
         return wpt_runner.run(logger, **params)
 
     @Command("wpt",
              category="testing",
              conditions=[conditions.is_firefox_or_android],
              parser=create_parser_wpt)
     def run_wpt(self, **params):
         return self.run_web_platform_tests(**params)
@@ -372,12 +401,24 @@ class MachCommands(MachCommandBase):
     def create_wpt(self, **params):
         return self.create_web_platform_test(**params)
 
     @Command("wpt-manifest-update",
              category="testing",
              parser=create_parser_manifest_update)
     def wpt_manifest_update(self, **params):
         self.setup()
-        wpt_setup = self._spawn(WebPlatformTestsRunnerSetup)
-        wpt_runner = WebPlatformTestsRunner(wpt_setup)
-        logger = wpt_runner.setup_logging(**params)
-        return 0 if wpt_runner.update_manifest(logger, **params) else 1
+        self.wpt_manifest_download(**params)
+        wpt_manifest_updater = self._spawn(WPTManifestUpdater)
+        logger = wpt_manifest_updater.setup_logging(**params)
+        self.wpt_manifest_download(logger, **params)
+        return wpt_manifest_updater.run_update(logger, **params)
+
+    @Command("wpt-manifest-download",
+             category="testing",
+             parser=create_parser_manifest_download)
+    def wpt_manifest_download(self, logger=None, **params):
+        self.setup()
+        if logger is None:
+            from wptrunner import wptlogging
+            logger = wptlogging.setup(params, {"mach": sys.stdout})
+        wpt_manifest_downloader = self._spawn(WPTManifestDownloader)
+        return wpt_manifest_downloader.run_download(logger, **params)
--- a/testing/web-platform/mach_commands_base.py
+++ b/testing/web-platform/mach_commands_base.py
@@ -17,31 +17,19 @@ class WebPlatformTestsRunner(object):
         self.setup = setup
 
     def setup_logging(self, **kwargs):
         from wptrunner import wptrunner
         return wptrunner.setup_logging(kwargs, {self.setup.default_log_type: sys.stdout})
 
     def run(self, logger, **kwargs):
         from wptrunner import wptrunner
-
-        if kwargs["manifest_update"] is not False:
-            self.update_manifest(logger)
-        kwargs["manifest_update"] = False
-
         if kwargs["product"] in ["firefox", None]:
             kwargs = self.setup.kwargs_firefox(kwargs)
         elif kwargs["product"] == "fennec":
             from wptrunner import wptcommandline
             kwargs = wptcommandline.check_args(self.setup.kwargs_common(kwargs))
         elif kwargs["product"] in ("chrome", "edge", "servo"):
             kwargs = self.setup.kwargs_wptrun(kwargs)
         else:
             raise ValueError("Unknown product %s" % kwargs["product"])
         result = wptrunner.start(**kwargs)
         return int(not result)
-
-    def update_manifest(self, logger, **kwargs):
-        import manifestupdate
-        return manifestupdate.run(logger=logger,
-                                  src_root=self.setup.topsrcdir,
-                                  obj_root=self.setup.topobjdir,
-                                  **kwargs)
--- a/testing/web-platform/manifestdownload.py
+++ b/testing/web-platform/manifestdownload.py
@@ -50,18 +50,16 @@ def should_download(logger, manifest_pat
         if mtime < datetime.now() - rebuild_time:
             return True
 
     logger.info("Skipping manifest download because existing file is recent")
     return False
 
 
 def taskcluster_url(logger, commits):
-    artifact_path = '/artifacts/public/manifests.tar.gz'
-
     cset_url = ('https://hg.mozilla.org/mozilla-central/json-pushes?'
                 'changeset={changeset}&version=2&tipsonly=1')
 
     tc_url = ('https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central.'
               'revision.{changeset}.source.manifest-upload')
 
     for revision in commits:
         req = None
@@ -90,73 +88,151 @@ def taskcluster_url(logger, commits):
 
         try:
             req = requests.get(tc_url.format(changeset=cset),
                                headers=HEADERS)
         except requests.exceptions.RequestException:
             return False
 
         if req.status_code == 200:
-            return tc_url.format(changeset=cset) + artifact_path
+            return tc_url.format(changeset=cset)
 
     logger.info("Can't find a commit-specific manifest so just using the most"
                 "recent one")
 
     return ("https://index.taskcluster.net/v1/task/gecko.v2.mozilla-central."
-            "latest.source.manifest-upload" +
-            artifact_path)
+            "latest.source.manifest-upload")
 
 
-def download_manifest(logger, test_paths, commits_func, url_func, force=False):
-    manifest_paths = [item["manifest_path"] for item in test_paths.itervalues()]
+def download_manifest(logger, wpt_dir, commits_func, url_func, force=False):
+    manifest_path = os.path.join(wpt_dir, "meta", "MANIFEST.json")
+    mozilla_manifest_path = os.path.join(wpt_dir, "mozilla", "meta", "MANIFEST.json")
 
-    if not force and not should_download(logger, manifest_paths):
+    if not force and not should_download(logger, [manifest_path, mozilla_manifest_path]):
         return True
 
     commits = commits_func()
     if not commits:
         return False
 
     url = url_func(logger, commits)
     if not url:
         logger.warning("No generated manifest found")
         return False
+    url+= "/artifacts/public/manifests.tar.gz"
 
     logger.info("Downloading manifest from %s" % url)
     try:
         req = requests.get(url, headers=HEADERS)
     except Exception:
         logger.warning("Downloading pregenerated manifest failed")
         return False
 
     if req.status_code != 200:
         logger.warning("Downloading pregenerated manifest failed; got"
                         "HTTP status %d" % req.status_code)
         return False
 
     tar = tarfile.open(mode="r:gz", fileobj=StringIO(req.content))
-    for paths in test_paths.itervalues():
-        try:
-            member = tar.getmember(paths["manifest_rel_path"])
-        except KeyError:
-            logger.warning("Failed to find downloaded manifest %s" % paths["manifest_rel_path"])
-        try:
-            logger.debug("Unpacking %s to %s" % (member.name, paths["manifest_path"]))
-            src = tar.extractfile(member)
-            with open(paths["manifest_path"], "w") as dest:
-                dest.write(src.read())
-            src.close()
-        except IOError:
-            import traceback
-            logger.warning("Failed to decompress %s:\n%s" % (paths["manifest_rel_path"], traceback.format_exc()))
-            return False
+    try:
+        tar.extractall(path=wpt_dir)
+    except IOError:
+        logger.warning("Failed to decompress downloaded file")
+        return False
+
+    os.utime(manifest_path, None)
+    os.utime(mozilla_manifest_path, None)
+
+    logger.info("Manifest downloaded")
+    return True
+
+
+def create_parser():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "-p", "--path", type=abs_path, help="Path to manifest file.")
+    parser.add_argument(
+        "--force", action="store_true",
+        help="Always download, even if the existing manifest is recent")
+    parser.add_argument(
+        "--no-manifest-update", action="store_false", dest="manifest_update",
+        default=True, help="Don't update the downloaded manifest")
+    return parser
+
+
+def download_from_taskcluster(logger, wpt_dir, repo_root, force=False):
+    return download_manifest(logger, wpt_dir, lambda: get_commits(logger, repo_root),
+                             taskcluster_url, force)
+
 
-        os.utime(paths["manifest_path"], None)
+def generate_config(obj_base_path):
+    """Generate the local wptrunner.ini file to use locally"""
+    import ConfigParser
+    here = os.path.split(os.path.abspath(__file__))[0]
+    config_path = os.path.join(here, 'wptrunner.ini')
+
+    if not os.path.exists(obj_base_path):
+        os.makedirs(obj_base_path)
+
+    path = os.path.join(obj_base_path, 'wptrunner.local.ini')
+
+    if os.path.exists(path):
+        return True
+
+    parser = ConfigParser.SafeConfigParser()
+    success = parser.read(config_path)
+    assert config_path in success, success
+
+    for name, path_prefix in [("upstream", ""),
+                              ("mozilla", "mozilla")]:
+        obj_path = os.path.join(obj_base_path, path_prefix)
+        src_path = os.path.join(here, path_prefix)
+        parser.set('manifest:%s' % name, 'manifest',
+                   os.path.join(obj_path, 'meta', 'MANIFEST.json'))
+
+        for key, dir_path in [("tests", "tests"), ("metadata", "meta")]:
+            parser.set("manifest:%s" % name, key, os.path.join(src_path, dir_path))
+
+    parser.set('paths', 'prefs', os.path.abspath(os.path.join(here, "..", 'profiles')))
+
+    with open(path, 'wb') as config_file:
+        parser.write(config_file)
 
     return True
 
 
-def download_from_taskcluster(logger, repo_root, test_paths, force=False):
-    return download_manifest(logger,
-                             test_paths,
-                             lambda: get_commits(logger, repo_root),
-                             taskcluster_url,
-                             force)
+def update_manifest(logger, config_dir, manifest_update=True):
+    if manifest_update:
+        logger.info("Updating manifests")
+        import manifestupdate
+        here = os.path.split(os.path.abspath(__file__))[0]
+        return manifestupdate.update(logger, here, config_dir=config_dir) is 0
+    else:
+        logger.info("Skipping manifest update")
+        return True
+
+def check_dirs(logger, success, wpt_dir):
+    if success:
+        return
+    else:
+        logger.info("Could not download manifests.")
+        logger.info("Generating from scratch instead.")
+        try:
+            os.mkdir(os.path.join(wpt_dir, "meta"))
+        except OSError:
+            pass
+        try:
+            os.makedirs(os.path.join(wpt_dir, "mozilla", "meta"))
+        except OSError:
+            pass
+
+
+def run(wpt_dir, repo_root, logger=None, force=False, manifest_update=True):
+    if not logger:
+        logger = logging.getLogger(__name__)
+        handler = logging.FileHandler(os.devnull)
+        logger.addHandler(handler)
+
+    success = download_from_taskcluster(logger, wpt_dir, repo_root, force)
+    check_dirs(logger, success, wpt_dir)
+    generate_config(wpt_dir)
+    success |= update_manifest(logger, wpt_dir, manifest_update)
+    return 0 if success else 1
--- a/testing/web-platform/manifestupdate.py
+++ b/testing/web-platform/manifestupdate.py
@@ -1,207 +1,76 @@
-import ConfigParser
 import argparse
 import imp
 import os
 import sys
+from collections import defaultdict
 
 from mozlog.structured import commandline
-from wptrunner.wptcommandline import set_from_config
-
-import manifestdownload
-from wptrunner import wptcommandline
+from wptrunner.wptcommandline import get_test_paths, set_from_config
 
 manifest = None
 
 
 def do_delayed_imports(wpt_dir):
     global manifest
-    imp.load_source("localpaths",
-                    os.path.join(wpt_dir, "tests", "tools", "localpaths.py"))
     sys.path.insert(0, os.path.join(wpt_dir, "tools", "manifest"))
     import manifest
 
 
+
 def create_parser():
     p = argparse.ArgumentParser()
     p.add_argument("--rebuild", action="store_true",
-                   help="Rebuild manifest from scratch")
-    download_group = p.add_mutually_exclusive_group()
-    download_group.add_argument(
-        "--download", dest="download", action="store_true", default=None,
-        help="Always download even if the local manifest is recent")
-    download_group.add_argument(
-        "--no-download", dest="download", action="store_false",
-        help="Don't try to download the manifest")
-    p.add_argument(
-        "--no-update", action="store_false", dest="update",
-        default=True, help="Just download the manifest, don't update")
-    p.add_argument(
-        "--config", action="store", dest="config_path", default=None,
-        help="Path to wptrunner config file")
-    p.add_argument(
-        "--rewrite-config", action="store_true", default=False,
-        help="Force the local configuration to be regenerated")
+                   help="Rebuild the manifest from scratch")
     commandline.add_logging_group(p)
 
     return p
 
 
-def ensure_kwargs(kwargs):
-    _kwargs = vars(create_parser().parse_args([]))
-    _kwargs.update(kwargs)
-    return _kwargs
+def update(logger, wpt_dir, rebuild=False, config_dir=None):
+    localpaths = imp.load_source("localpaths",
+                                 os.path.join(wpt_dir, "tests", "tools", "localpaths.py"))
+
+    if not config_dir:
+        config_dir = wpt_dir
+        config_name = "wptrunner.ini"
+    else:
+        if not os.path.exists(os.path.join(config_dir, 'wptrunner.local.ini')):
+            from manifestdownload import generate_config
+            generate_config(config_dir)
+        config_name = "wptrunner.local.ini"
+
+    kwargs = {"config": os.path.join(config_dir, config_name),
+              "tests_root": None,
+              "metadata_root": None}
+
+    set_from_config(kwargs)
+    do_delayed_imports(wpt_dir)
+
+    return _update(logger, kwargs["test_paths"], rebuild)
 
 
-def run(src_root, obj_root, logger=None, **kwargs):
-    kwargs = ensure_kwargs(kwargs)
-
-    if logger is None:
-        from wptrunner import wptlogging
-        logger = wptlogging.setup(kwargs, {"mach": sys.stdout})
-
-    src_wpt_dir = os.path.join(src_root, "testing", "web-platform")
-
-    do_delayed_imports(src_wpt_dir)
-
-    if not kwargs["config_path"]:
-        config_path = generate_config(logger,
-                                      src_root,
-                                      src_wpt_dir,
-                                      os.path.join(obj_root, "_tests", "web-platform"),
-                                      kwargs["rewrite_config"])
-    else:
-        config_path = kwargs["config_path"]
-
-    if not os.path.exists(config_path):
-        logger.critical("Config file %s does not exist" % config_path)
-        return None
-
-    logger.debug("Using config path %s" % config_path)
-
-    test_paths = wptcommandline.get_test_paths(
-        wptcommandline.config.read(config_path))
-
-    for paths in test_paths.itervalues():
-        if "manifest_path" not in paths:
-            paths["manifest_path"] = os.path.join(paths["metadata_path"],
-                                                  "MANIFEST.json")
-
-    ensure_manifest_directories(logger, test_paths)
-
-    local_config = read_local_config(src_wpt_dir)
-    for section in ["manifest:upstream", "manifest:mozilla"]:
-        url_base = local_config.get(section, "url_base")
-        manifest_rel_path = os.path.join(local_config.get(section, "metadata"),
-                                         "MANIFEST.json")
-        test_paths[url_base]["manifest_rel_path"] = manifest_rel_path
-
-    if not kwargs["rebuild"] and kwargs["download"] is not False:
-        force_download = False if kwargs["download"] is None else True
-        manifestdownload.download_from_taskcluster(logger,
-                                                   src_root,
-                                                   test_paths,
-                                                   force=force_download)
-    else:
-        logger.debug("Skipping manifest download")
-
-    if kwargs["update"] or kwargs["rebuild"]:
-        manifests = update(logger, src_wpt_dir, test_paths, rebuild=kwargs["rebuild"])
-    else:
-        logger.debug("Skipping manifest update")
-        manifests = load_manifests(test_paths)
-
-    return manifests
-
-
-def ensure_manifest_directories(logger, test_paths):
-    for paths in test_paths.itervalues():
-        manifest_dir = os.path.dirname(paths["manifest_path"])
-        if not os.path.exists(manifest_dir):
-            logger.info("Creating directory %s" % manifest_dir)
-            os.makedirs(manifest_dir)
-        elif not os.path.isdir(manifest_dir):
-            raise IOError("Manifest directory is a file")
-
-
-def read_local_config(wpt_dir):
-    src_config_path = os.path.join(wpt_dir, "wptrunner.ini")
-
-    parser = ConfigParser.SafeConfigParser()
-    success = parser.read(src_config_path)
-    assert src_config_path in success
-    return parser
-
-
-def generate_config(logger, repo_root, wpt_dir, dest_path, force_rewrite=False):
-    """Generate the local wptrunner.ini file to use locally"""
-    if not os.path.exists(dest_path):
-        os.makedirs(dest_path)
-
-    dest_config_path = os.path.join(dest_path, 'wptrunner.local.ini')
-
-    if not force_rewrite and os.path.exists(dest_config_path):
-        logger.debug("Config is up to date, not regenerating")
-        return dest_config_path
-
-    logger.info("Creating config file %s" % dest_config_path)
-
-    parser = read_local_config(wpt_dir)
-
-    for section in ["manifest:upstream", "manifest:mozilla"]:
-        meta_rel_path = parser.get(section, "metadata")
-        tests_rel_path = parser.get(section, "tests")
-
-        parser.set(section, "manifest",
-                   os.path.join(dest_path, meta_rel_path, 'MANIFEST.json'))
-        parser.set(section, "metadata", os.path.join(wpt_dir, meta_rel_path))
-        parser.set(section, "tests", os.path.join(wpt_dir, tests_rel_path))
-
-    parser.set('paths', 'prefs', os.path.abspath(os.path.join(wpt_dir, parser.get("paths", "prefs"))))
-
-    with open(dest_config_path, 'wb') as config_file:
-        parser.write(config_file)
-
-    return dest_config_path
-
-
-def update(logger, wpt_dir, test_paths, rebuild=False, config_dir=None):
-    rv = {}
-
+def _update(logger, test_paths, rebuild):
     for url_base, paths in test_paths.iteritems():
+        if "manifest_path" in paths:
+            manifest_path = paths["manifest_path"]
+        else:
+            manifest_path = os.path.join(paths["metadata_path"], "MANIFEST.json")
         m = None
-        manifest_path = paths["manifest_path"]
         if not rebuild and os.path.exists(manifest_path):
-            logger.info("Updating manifest %s" % manifest_path)
             try:
                 m = manifest.manifest.load(paths["tests_path"], manifest_path)
             except manifest.manifest.ManifestVersionMismatch:
                 logger.info("Manifest format changed, rebuilding")
         if m is None:
-            logger.info("Recreating manifest %s" % manifest_path)
             m = manifest.manifest.Manifest(url_base)
         manifest.update.update(paths["tests_path"], m, working_copy=True)
         manifest.manifest.write(m, manifest_path)
-
-        path_data = {"url_base": url_base}
-        path_data.update(paths)
-        rv[m] = path_data
-
-    return rv
-
-
-def load_manifests(test_paths):
-    rv = {}
-    for url_base, paths in test_paths.iteritems():
-        m = manifest.manifest.load(paths["tests_path"], manifest_path)
-        path_data = {"url_base": url_base}
-        path_data.update(paths)
-        rv[m] = path_data
-    return rv
+    return 0
 
 
 def log_error(logger, manifest_path, msg):
     logger.lint_error(path=manifest_path,
                       message=msg,
                       lineno=0,
                       source="",
                       linter="wpt-manifest")
--- a/testing/web-platform/meta/css/css-images/gradient/color-stops-parsing.html.ini
+++ b/testing/web-platform/meta/css/css-images/gradient/color-stops-parsing.html.ini
@@ -1,57 +1,9 @@
 [color-stops-parsing.html]
-  [linear-gradient(black 0% 50%, white) [ parsable \]]
-    expected: FAIL
-
-  [linear-gradient(black 0% 50%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [linear-gradient(black 0% 50%, green 25% 75%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [linear-gradient(black 0% calc(100% / 5), 25%, green 30% 60%, calc(100% * 3 / 4), white calc(100% - 20%) 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-linear-gradient(black 0% 50%, white) [ parsable \]]
-    expected: FAIL
-
-  [repeating-linear-gradient(black 0% 50%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-linear-gradient(black 0% 50%, green 25% 75%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-linear-gradient(black 0% calc(100% / 5), 25%, green 30% 60%, calc(100% * 3 / 4), white calc(100% - 20%) 100%) [ parsable \]]
-    expected: FAIL
-
-  [radial-gradient(black 0% 50%, white) [ parsable \]]
-    expected: FAIL
-
-  [radial-gradient(black 0% 50%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [radial-gradient(black 0% 50%, green 25% 75%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [radial-gradient(black 0% calc(100% / 5), 25%, green 30% 60%, calc(100% * 3 / 4), white calc(100% - 20%) 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-radial-gradient(black 0% 50%, white) [ parsable \]]
-    expected: FAIL
-
-  [repeating-radial-gradient(black 0% 50%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-radial-gradient(black 0% 50%, green 25% 75%, white 50% 100%) [ parsable \]]
-    expected: FAIL
-
-  [repeating-radial-gradient(black 0% calc(100% / 5), 25%, green 30% 60%, calc(100% * 3 / 4), white calc(100% - 20%) 100%) [ parsable \]]
-    expected: FAIL
-
   [conic-gradient(black, white) [ parsable \]]
     expected: FAIL
 
   [conic-gradient(black 0, white) [ parsable \]]
     expected: FAIL
 
   [conic-gradient(black 0%, white) [ parsable \]]
     expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/meta/css/css-images/multiple-position-color-stop-linear.html.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[multiple-position-color-stop-linear.html]
-  expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/meta/css/css-images/multiple-position-color-stop-radial.html.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[multiple-position-color-stop-radial.html]
-  expected: FAIL
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/meta/encrypted-media/clearkey-mp4-unique-origin.https.html.ini
@@ -0,0 +1,4 @@
+[clearkey-mp4-unique-origin.https.html]
+  [Unique origin is unable to create MediaKeys]
+    expected: FAIL
+
--- a/testing/web-platform/meta/encrypted-media/encrypted-media-default-feature-policy.https.sub.html.ini
+++ b/testing/web-platform/meta/encrypted-media/encrypted-media-default-feature-policy.https.sub.html.ini
@@ -1,8 +1,7 @@
 [encrypted-media-default-feature-policy.https.sub.html]
   expected: TIMEOUT
   [Default "encrypted-media" feature policy ["self"\] allows same-origin iframes.]
     expected: TIMEOUT
 
-  [Feature policy "encrypted-media" can be enabled in cross-origin iframes using "allow" attribute.]
+  [Default "encrypted-media" feature policy ["self"\] disallows cross-origin iframes.]
     expected: FAIL
-
--- a/testing/web-platform/meta/feature-policy/payment-default-feature-policy.https.sub.html.ini
+++ b/testing/web-platform/meta/feature-policy/payment-default-feature-policy.https.sub.html.ini
@@ -7,10 +7,11 @@
     expected:
       if not e10s: FAIL
 
   [Default "payment" feature policy ["self"\] allowpaymentrequest=true allows same-origin iframes.]
     expected:
       if not e10s: FAIL
 
   [Default "payment" feature policy ["self"\] allowpaymentrequest=true allows cross-origin iframes.]
-    expected: FAIL
+    expected:
+      if not e10s: FAIL
 
deleted file mode 100644
--- a/testing/web-platform/meta/webvr/webvr-enabled-by-feature-policy-attribute.https.sub.html.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[webvr-enabled-by-feature-policy-attribute.https.sub.html]
-  [Feature-Policy allow="vr" attribute allows cross-origin iframe]
-    expected: FAIL
-
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/css/css-images/multiple-position-color-stop-linear-2-ref.html
@@ -0,0 +1,5 @@
+<!doctype html>
+<meta charset=utf-8>
+<body>
+    <div style="background: linear-gradient(to bottom, red 0%, red 25%, blue 25%, blue 75%, red 75%, red 100%); width: 100px; height: 100px;"><br></div>
+</body>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/css/css-images/multiple-position-color-stop-linear-2.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Linear gradient with a two position color stops</title>
+<link rel="help" href="https://drafts.csswg.org/css-images-4/#color-stop-syntax">
+<meta name="assert" content="Color stops with two positions are equivalent to two color stops with the same color">
+<link rel=match href=/css/css-images/multiple-position-color-stop-linear-2-ref.html>
+<body>
+    <div style="background: linear-gradient(to bottom, red 0% 25%, blue 25% 75%, red 75% 100%); width: 100px; height: 100px;"><br></div>
+</body>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/css/css-images/multiple-position-color-stop-radial-2-ref.html
@@ -0,0 +1,5 @@
+<!doctype html>
+<meta charset=utf-8>
+<body>
+    <div style="background: radial-gradient(center, red 0%, red 25%, blue 25%, blue 75%, red 75%, red 100%); width: 100px; height: 100px;"><br></div>
+</body>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/css/css-images/multiple-position-color-stop-radial-2.html
@@ -0,0 +1,9 @@
+<!doctype html>
+<meta charset=utf-8>
+<title>Radial gradient with a two position color stops</title>
+<link rel="help" href="https://drafts.csswg.org/css-images-4/#color-stop-syntax">
+<meta name="assert" content="Color stops with two positions are equivalent to two color stops with the same color">
+<link rel=match href=/css/css-images/multiple-position-color-stop-radial-2-ref.html>
+<body>
+    <div style="background: radial-gradient(center, red 0% 25%, blue 25% 75%, red 75% 100%); width: 100px; height: 100px;"><br></div>
+</body>
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/fennec.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/fennec.py
@@ -91,17 +91,17 @@ def browser_kwargs(test_type, run_info_d
             "device_serial": kwargs["device_serial"],
             "prefs_root": kwargs["prefs_root"],
             "extra_prefs": kwargs["extra_prefs"],
             "test_type": test_type,
             "debug_info": kwargs["debug_info"],
             "symbols_path": kwargs["symbols_path"],
             "stackwalk_binary": kwargs["stackwalk_binary"],
             "certutil_binary": kwargs["certutil_binary"],
-            "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
+            "ca_certificate_path": config.ssl_config["ca_cert_path"],
             "stackfix_dir": kwargs["stackfix_dir"],
             "binary_args": kwargs["binary_args"],
             "timeout_multiplier": get_timeout_multiplier(test_type,
                                                          run_info_data,
                                                          **kwargs),
             "leak_check": kwargs["leak_check"],
             "stylo_threads": kwargs["stylo_threads"],
             "chaos_mode_flags": kwargs["chaos_mode_flags"],
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/config.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/config.py
@@ -1,47 +1,44 @@
 import ConfigParser
 import os
 import sys
 from collections import OrderedDict
 
 here = os.path.split(__file__)[0]
 
-
 class ConfigDict(dict):
     def __init__(self, base_path, *args, **kwargs):
         self.base_path = base_path
         dict.__init__(self, *args, **kwargs)
 
     def get_path(self, key, default=None):
         if key not in self:
             return default
         path = self[key]
         os.path.expanduser(path)
         return os.path.abspath(os.path.join(self.base_path, path))
 
-
 def read(config_path):
     config_path = os.path.abspath(config_path)
-    config_root = os.path.dirname(config_path)
+    config_root = os.path.split(config_path)[0]
     parser = ConfigParser.SafeConfigParser()
     success = parser.read(config_path)
     assert config_path in success, success
 
     subns = {"pwd": os.path.abspath(os.path.curdir)}
 
     rv = OrderedDict()
     for section in parser.sections():
         rv[section] = ConfigDict(config_root)
         for key in parser.options(section):
             rv[section][key] = parser.get(section, key, False, subns)
 
     return rv
 
-
 def path(argv=None):
     if argv is None:
         argv = []
     path = None
 
     for i, arg in enumerate(argv):
         if arg == "--config":
             if i + 1 < len(argv):
@@ -54,11 +51,10 @@ def path(argv=None):
     if path is None:
         if os.path.exists("wptrunner.ini"):
             path = os.path.abspath("wptrunner.ini")
         else:
             path = os.path.join(here, "..", "wptrunner.default.ini")
 
     return os.path.abspath(path)
 
-
 def load():
     return read(path(sys.argv))
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptcommandline.py
@@ -361,16 +361,17 @@ def set_from_config(kwargs):
 
     if kwargs.get("manifest_path"):
         if "/" not in kwargs["test_paths"]:
             kwargs["test_paths"]["/"] = {}
         kwargs["test_paths"]["/"]["manifest_path"] = kwargs["manifest_path"]
 
     kwargs["suite_name"] = kwargs["config"].get("web-platform-tests", {}).get("name", "web-platform-tests")
 
+
     check_paths(kwargs)
 
 
 def get_test_paths(config):
     # Set up test_paths
     test_paths = OrderedDict()
 
     for section in config.iterkeys():
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
@@ -66,20 +66,19 @@ def get_run_info(metadata_root, product,
     return RunInfo(metadata_root, product, **kwargs)
 
 
 class RunInfo(dict):
     def __init__(self, metadata_root, product, debug,
                  browser_version=None,
                  browser_channel=None,
                  verify=None,
-                 extras=None,
-                 raise_exception=True):
+                 extras=None):
         import mozinfo
-        self._update_mozinfo(metadata_root, raise_exception=raise_exception)
+        self._update_mozinfo(metadata_root)
         self.update(mozinfo.info)
 
         from update.tree import GitTree
         try:
             # GitTree.__init__ throws if we are not in a git tree.
             rev = GitTree(log_error=False).rev
         except (OSError, subprocess.CalledProcessError):
             rev = None
@@ -98,30 +97,30 @@ class RunInfo(dict):
             self["browser_channel"] = browser_channel
 
         self["verify"] = verify
         if "wasm" not in self:
             self["wasm"] = False
         if extras is not None:
             self.update(extras)
 
-    def _update_mozinfo(self, metadata_root, **kwargs):
+    def _update_mozinfo(self, metadata_root):
         """Add extra build information from a mozinfo.json file in a parent
         directory"""
         import mozinfo
 
         path = metadata_root
         dirs = set()
         while path != os.path.expanduser('~'):
             if path in dirs:
                 break
             dirs.add(str(path))
             path = os.path.split(path)[0]
 
-        mozinfo.find_and_update_from_json(*dirs, **kwargs)
+        mozinfo.find_and_update_from_json(*dirs)
 
 
 class Test(object):
 
     result_cls = None
     subtest_result_cls = None
     test_type = None