Merge m-c to fx-team
authorCarsten "Tomcat" Book <cbook@mozilla.com>
Fri, 10 Oct 2014 15:06:16 +0200
changeset 233002 e1ae6cb6c6b131aadcf10ac9b3486694757bb576
parent 233001 984a9d8ac938b58e3e78c6a3573dfc55718eae00 (current diff)
parent 232968 097821fd89ed755f444f07ab501009855d996b2d (diff)
child 233003 320dd3495026132dda46ca14cae852070787209e
push id4187
push userbhearsum@mozilla.com
push dateFri, 28 Nov 2014 15:29:12 +0000
treeherdermozilla-beta@f23cc6a30c11 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
milestone35.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge m-c to fx-team
memory/replace/dmd/check_test_output.py
--- a/b2g/chrome/content/devtools/hud.js
+++ b/b2g/chrome/content/devtools/hud.js
@@ -51,18 +51,19 @@ let developerHUD = {
    * observed metrics with `target.register(metric)`, and keep them up-to-date
    * with `target.update(metric, message)` when necessary.
    */
   registerWatcher: function dwp_registerWatcher(watcher) {
     this._watchers.unshift(watcher);
   },
 
   init: function dwp_init() {
-    if (this._client)
+    if (this._client) {
       return;
+    }
 
     if (!DebuggerServer.initialized) {
       RemoteDebugger.initServer();
     }
 
     // We instantiate a local debugger connection so that watchers can use our
     // DebuggerClient to send requests to tab actors (e.g. the consoleActor).
     // Note the special usage of the private _serverConnection, which we need
@@ -86,36 +87,38 @@ let developerHUD = {
     }
 
     SettingsListener.observe('hud.logging', this._logging, enabled => {
       this._logging = enabled;
     });
   },
 
   uninit: function dwp_uninit() {
-    if (!this._client)
+    if (!this._client) {
       return;
+    }
 
     for (let frame of this._targets.keys()) {
       this.untrackFrame(frame);
     }
 
     AppFrames.removeObserver(this);
 
     this._client.close();
     delete this._client;
   },
 
   /**
    * This method will ask all registered watchers to track and update metrics
    * on an app frame.
    */
   trackFrame: function dwp_trackFrame(frame) {
-    if (this._targets.has(frame))
+    if (this._targets.has(frame)) {
       return;
+    }
 
     DebuggerServer.connectToChild(this._conn, frame).then(actor => {
       let target = new Target(frame, actor);
       this._targets.set(frame, target);
 
       for (let w of this._watchers) {
         w.trackTarget(target);
       }
@@ -334,56 +337,56 @@ let consoleWatcher = {
 
     switch (packet.type) {
 
       case 'pageError':
         let pageError = packet.pageError;
 
         if (pageError.warning || pageError.strict) {
           metric.name = 'warnings';
-          output += 'warning (';
+          output += 'Warning (';
         } else {
           metric.name = 'errors';
-          output += 'error (';
+          output += 'Error (';
         }
 
         if (this._security.indexOf(pageError.category) > -1) {
           metric.name = 'security';
         }
 
         let {errorMessage, sourceName, category, lineNumber, columnNumber} = pageError;
         output += category + '): "' + (errorMessage.initial || errorMessage) +
           '" in ' + sourceName + ':' + lineNumber + ':' + columnNumber;
         break;
 
       case 'consoleAPICall':
         switch (packet.message.level) {
 
           case 'error':
             metric.name = 'errors';
-            output += 'error (console)';
+            output += 'Error (console)';
             break;
 
           case 'warn':
             metric.name = 'warnings';
-            output += 'warning (console)';
+            output += 'Warning (console)';
             break;
 
           default:
             return;
         }
         break;
 
       case 'reflowActivity':
         metric.name = 'reflows';
 
         let {start, end, sourceURL, interruptible} = packet;
         metric.interruptible = interruptible;
         let duration = Math.round((end - start) * 100) / 100;
-        output += 'reflow: ' + duration + 'ms';
+        output += 'Reflow: ' + duration + 'ms';
         if (sourceURL) {
           output += ' ' + this.formatSourceURL(packet);
         }
         break;
 
       default:
         return;
     }
@@ -420,16 +423,17 @@ let eventLoopLagWatcher = {
 
     SettingsListener.observe('hud.jank', false, this.settingsListener.bind(this));
   },
 
   settingsListener: function(value) {
     if (this._active == value) {
       return;
     }
+
     this._active = value;
 
     // Toggle the state of existing fronts.
     let fronts = this._fronts;
     for (let target of fronts.keys()) {
       if (value) {
         fronts.get(target).start();
       } else {
@@ -441,17 +445,17 @@ let eventLoopLagWatcher = {
 
   trackTarget: function(target) {
     target.register('jank');
 
     let front = new EventLoopLagFront(this._client, target.actor);
     this._fronts.set(target, front);
 
     front.on('event-loop-lag', time => {
-      target.update({name: 'jank', value: time}, 'jank: ' + time + 'ms');
+      target.update({name: 'jank', value: time}, 'Jank: ' + time + 'ms');
     });
 
     if (this._active) {
       front.start();
     }
   },
 
   untrackTarget: function(target) {
@@ -495,17 +499,17 @@ let memoryWatcher = {
         watching[category] = watch;
         this.update();
       });
     }
   },
 
   update: function mw_update() {
     let watching = this._watching;
-    let active = watching.memory || watching.uss;
+    let active = watching.appmemory || watching.uss;
 
     if (this._active) {
       for (let target of this._fronts.keys()) {
         if (!watching.appmemory) target.clear({name: 'memory'});
         if (!watching.uss) target.clear({name: 'uss'});
         if (!active) clearTimeout(this._timers.get(target));
       }
     } else if (active) {
@@ -514,58 +518,69 @@ let memoryWatcher = {
       }
     }
     this._active = active;
   },
 
   measure: function mw_measure(target) {
     let watch = this._watching;
     let front = this._fronts.get(target);
+    let format = this.formatMemory;
 
     if (watch.uss) {
       front.residentUnique().then(value => {
-        target.update({name: 'uss', value: value});
+        target.update({name: 'uss', value: value}, 'USS: ' + format(value));
       }, err => {
         console.error(err);
       });
     }
 
     if (watch.appmemory) {
       front.measure().then(data => {
         let total = 0;
-        if (watch.jsobjects) {
-          total += parseInt(data.jsObjectsSize);
-        }
-        if (watch.jsstrings) {
-          total += parseInt(data.jsStringsSize);
-        }
-        if (watch.jsother) {
-          total += parseInt(data.jsOtherSize);
+        let details = [];
+
+        function item(name, condition, value) {
+          if (!condition) {
+            return;
+          }
+
+          let v = parseInt(value);
+          total += v;
+          details.push(name + ': ' + format(v));
         }
-        if (watch.dom) {
-          total += parseInt(data.domSize);
-        }
-        if (watch.style) {
-          total += parseInt(data.styleSize);
-        }
-        if (watch.other) {
-          total += parseInt(data.otherSize);
-        }
+
+        item('JS objects', watch.jsobjects, data.jsObjectsSize);
+        item('JS strings', watch.jsstrings, data.jsStringsSize);
+        item('JS other', watch.jsother, data.jsOtherSize);
+        item('DOM', watch.dom, data.domSize);
+        item('Style', watch.style, data.styleSize);
+        item('Other', watch.other, data.otherSize);
         // TODO Also count images size (bug #976007).
 
-        target.update({name: 'memory', value: total});
+        target.update({name: 'memory', value: total},
+          'App Memory: ' + format(total) + ' (' + details.join(', ') + ')');
       }, err => {
         console.error(err);
       });
     }
 
-    let timer = setTimeout(() => this.measure(target), 500);
+    let timer = setTimeout(() => this.measure(target), 800);
     this._timers.set(target, timer);
   },
 
+  formatMemory: function mw_formatMemory(bytes) {
+    var prefix = ['','K','M','G','T','P','E','Z','Y'];
+    var i = 0;
+    for (; bytes > 1024 && i < prefix.length; ++i) {
+      bytes /= 1024;
+    }
+    return (Math.round(bytes * 100) / 100) + ' ' + prefix[i] + 'B';
+  },
+
   trackTarget: function mw_trackTarget(target) {
     target.register('uss');
     target.register('memory');
     this._fronts.set(target, MemoryFront(this._client, target.actor));
     if (this._active) {
       this.measure(target);
     }
   },
--- a/b2g/config/dolphin/sources.xml
+++ b/b2g/config/dolphin/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was git://codeaurora.org/-->
   <remote fetch="https://git.mozilla.org/external/caf" name="caf"/>
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="3a2947df41a480de1457a6dcdbf46ad0af70d8e0">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
--- a/b2g/config/emulator-ics/sources.xml
+++ b/b2g/config/emulator-ics/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="84923f1940625c47ff4c1fdf01b10fde3b7d909e">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="c058843242068d0df7c107e09da31b53d2e08fa6"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
--- a/b2g/config/emulator-jb/sources.xml
+++ b/b2g/config/emulator-jb/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="8986df0f82e15ac2798df0b6c2ee3435400677ac">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
--- a/b2g/config/emulator-kk/sources.xml
+++ b/b2g/config/emulator-kk/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was git://codeaurora.org/-->
   <remote fetch="https://git.mozilla.org/external/caf" name="caf"/>
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="3a2947df41a480de1457a6dcdbf46ad0af70d8e0">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
--- a/b2g/config/emulator/sources.xml
+++ b/b2g/config/emulator/sources.xml
@@ -14,17 +14,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="refs/tags/android-4.0.4_r2.1" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="84923f1940625c47ff4c1fdf01b10fde3b7d909e">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="platform_hardware_ril" path="hardware/ril" remote="b2g" revision="cd88d860656c31c7da7bb310d6a160d0011b0961"/>
   <project name="platform_external_qemu" path="external/qemu" remote="b2g" revision="c058843242068d0df7c107e09da31b53d2e08fa6"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="dd924f92906085b831bf1cbbc7484d3c043d613c"/>
--- a/b2g/config/flame-kk/sources.xml
+++ b/b2g/config/flame-kk/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was git://codeaurora.org/-->
   <remote fetch="https://git.mozilla.org/external/caf" name="caf"/>
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="3a2947df41a480de1457a6dcdbf46ad0af70d8e0">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
--- a/b2g/config/flame/sources.xml
+++ b/b2g/config/flame/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="8986df0f82e15ac2798df0b6c2ee3435400677ac">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="e95b4ce22c825da44d14299e1190ea39a5260bde"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="471afab478649078ad7c75ec6b252481a59e19b8"/>
--- a/b2g/config/gaia.json
+++ b/b2g/config/gaia.json
@@ -1,9 +1,9 @@
 {
     "git": {
         "git_revision": "", 
         "remote": "", 
         "branch": ""
     }, 
-    "revision": "ab9466a85acc108164bc17b9064387142b82d4da", 
+    "revision": "eeeae73691f91cd5042660b0f19c84747ebc7be2", 
     "repo_path": "/integration/gaia-central"
 }
--- a/b2g/config/hamachi/sources.xml
+++ b/b2g/config/hamachi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="84923f1940625c47ff4c1fdf01b10fde3b7d909e">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/helix/sources.xml
+++ b/b2g/config/helix/sources.xml
@@ -10,17 +10,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <default remote="caf" revision="b2g/ics_strawberry" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="84923f1940625c47ff4c1fdf01b10fde3b7d909e">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
   <project name="platform/bionic" path="bionic" revision="d2eb6c7b6e1bc7643c17df2d9d9bcb1704d0b9ab"/>
--- a/b2g/config/nexus-4/sources.xml
+++ b/b2g/config/nexus-4/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was https://git.mozilla.org/releases-->
   <remote fetch="https://git.mozilla.org/releases" name="mozillaorg"/>
   <!-- B2G specific things. -->
   <project name="platform_build" path="build" remote="b2g" revision="8986df0f82e15ac2798df0b6c2ee3435400677ac">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="fake-libdvm" path="dalvik" remote="b2g" revision="d50ae982b19f42f0b66d08b9eb306be81687869f"/>
-  <project name="gaia" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <project name="valgrind" path="external/valgrind" remote="b2g" revision="daa61633c32b9606f58799a3186395fd2bbb8d8c"/>
   <project name="vex" path="external/VEX" remote="b2g" revision="47f031c320888fe9f3e656602588565b52d43010"/>
   <!-- Stock Android things -->
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.1" path="prebuilts/clang/linux-x86/3.1" revision="5c45f43419d5582949284eee9cef0c43d866e03b"/>
   <project groups="linux" name="platform/prebuilts/clang/linux-x86/3.2" path="prebuilts/clang/linux-x86/3.2" revision="3748b4168e7bd8d46457d4b6786003bc6a5223ce"/>
--- a/b2g/config/wasabi/sources.xml
+++ b/b2g/config/wasabi/sources.xml
@@ -12,17 +12,17 @@
   <!--original fetch url was git://github.com/apitrace/-->
   <remote fetch="https://git.mozilla.org/external/apitrace" name="apitrace"/>
   <default remote="caf" revision="ics_chocolate_rb4.2" sync-j="4"/>
   <!-- Gonk specific things and forks -->
   <project name="platform_build" path="build" remote="b2g" revision="84923f1940625c47ff4c1fdf01b10fde3b7d909e">
     <copyfile dest="Makefile" src="core/root.mk"/>
   </project>
   <project name="fake-dalvik" path="dalvik" remote="b2g" revision="ca1f327d5acc198bb4be62fa51db2c039032c9ce"/>
-  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="1036b544b7e102592bd9fab95cd9317329ac1293"/>
+  <project name="gaia.git" path="gaia" remote="mozillaorg" revision="cc5da7b055e2b06fdeb46fa94970550392ee571d"/>
   <project name="gonk-misc" path="gonk-misc" remote="b2g" revision="cc1f362ce43dce92ac786187ff4abf39060094bd"/>
   <project name="rilproxy" path="rilproxy" remote="b2g" revision="827214fcf38d6569aeb5c6d6f31cb296d1f09272"/>
   <project name="librecovery" path="librecovery" remote="b2g" revision="891e5069c0ad330d8191bf8c7b879c814258c89f"/>
   <project name="moztt" path="external/moztt" remote="b2g" revision="562d357b72279a9e35d4af5aeecc8e1ffa2f44f1"/>
   <project name="apitrace" path="external/apitrace" remote="apitrace" revision="6ca2008ac50b163d31244ef9f036cb224f4f229b"/>
   <project name="gonk-patches" path="patches" remote="b2g" revision="223a2421006e8f5da33f516f6891c87cae86b0f6"/>
   <!-- Stock Android things -->
   <project name="platform/abi/cpp" path="abi/cpp" revision="6426040f1be4a844082c9769171ce7f5341a5528"/>
--- a/content/media/MediaStreamGraph.cpp
+++ b/content/media/MediaStreamGraph.cpp
@@ -27,16 +27,18 @@
 #include <algorithm>
 #include "DOMMediaStream.h"
 #include "GeckoProfiler.h"
 #include "mozilla/unused.h"
 #ifdef MOZ_WEBRTC
 #include "AudioOutputObserver.h"
 #endif
 
+#include "webaudio/blink/HRTFDatabaseLoader.h"
+
 using namespace mozilla::layers;
 using namespace mozilla::dom;
 using namespace mozilla::gfx;
 
 namespace mozilla {
 
 #ifdef PR_LOGGING
 PRLogModuleInfo* gMediaStreamGraphLog;
@@ -2906,16 +2908,25 @@ MediaStreamGraphImpl::CollectReports(nsI
 
     nsPrintfCString streamPath("explicit/webaudio/audio-node/%s/stream-objects",
                                nodeType);
     REPORT(streamPath, usage.mStream,
            "Memory used by AudioNode stream objects (Web Audio).");
 
   }
 
+  size_t hrtfLoaders = WebCore::HRTFDatabaseLoader::sizeOfLoaders(MallocSizeOf);
+  if (hrtfLoaders) {
+
+    REPORT(NS_LITERAL_CSTRING(
+              "explicit/webaudio/audio-node/PannerNode/hrtf-databases"),
+           hrtfLoaders,
+           "Memory used by PannerNode databases (Web Audio).");
+  }
+
 #undef REPORT
 
   return NS_OK;
 }
 
 SourceMediaStream*
 MediaStreamGraph::CreateSourceStream(DOMMediaStream* aWrapper)
 {
--- a/content/media/webaudio/blink/HRTFDatabaseLoader.cpp
+++ b/content/media/webaudio/blink/HRTFDatabaseLoader.cpp
@@ -32,16 +32,21 @@
 using namespace mozilla;
 
 namespace WebCore {
 
 // Singleton
 nsTHashtable<HRTFDatabaseLoader::LoaderByRateEntry>*
     HRTFDatabaseLoader::s_loaderMap = nullptr;
 
+size_t HRTFDatabaseLoader::sizeOfLoaders(mozilla::MallocSizeOf aMallocSizeOf)
+{
+    return s_loaderMap ? s_loaderMap->SizeOfIncludingThis(aMallocSizeOf) : 0;
+}
+
 TemporaryRef<HRTFDatabaseLoader> HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(float sampleRate)
 {
     MOZ_ASSERT(NS_IsMainThread());
 
     RefPtr<HRTFDatabaseLoader> loader;
     
     if (!s_loaderMap) {
         s_loaderMap = new nsTHashtable<LoaderByRateEntry>();
--- a/content/media/webaudio/blink/HRTFDatabaseLoader.h
+++ b/content/media/webaudio/blink/HRTFDatabaseLoader.h
@@ -89,39 +89,48 @@ public:
 
     float databaseSampleRate() const { return m_databaseSampleRate; }
 
     static void shutdown();
     
     // Called in asynchronous loading thread.
     void load();
 
-    size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+    // Sums the size of all cached database loaders.
+    static size_t sizeOfLoaders(mozilla::MallocSizeOf aMallocSizeOf);
 
 private:
     // Both constructor and destructor must be called from the main thread.
     explicit HRTFDatabaseLoader(float sampleRate);
     ~HRTFDatabaseLoader();
-    
+
+    size_t sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
+
     void ProxyRelease(); // any thread
     void MainThreadRelease(); // main thread only
     class ProxyReleaseEvent;
 
     // If it hasn't already been loaded, creates a new thread and initiates asynchronous loading of the default database.
     // This must be called from the main thread.
     void loadAsynchronously();
 
     // Map from sample-rate to loader.
     class LoaderByRateEntry : public nsFloatHashKey {
     public:
         explicit LoaderByRateEntry(KeyTypePointer aKey)
             : nsFloatHashKey(aKey)
             , mLoader() // so PutEntry() will zero-initialize
         {
         }
+
+        size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
+        {
+            return mLoader ? mLoader->sizeOfIncludingThis(aMallocSizeOf) : 0;
+        }
+
         HRTFDatabaseLoader* mLoader;
     };
 
     static PLDHashOperator shutdownEnumFunc(LoaderByRateEntry *entry,
                                             void* unused);
 
     // Keeps track of loaders on a per-sample-rate basis.
     static nsTHashtable<LoaderByRateEntry> *s_loaderMap; // singleton
--- a/content/media/webaudio/blink/HRTFPanner.cpp
+++ b/content/media/webaudio/blink/HRTFPanner.cpp
@@ -69,20 +69,17 @@ HRTFPanner::~HRTFPanner()
 {
     MOZ_COUNT_DTOR(HRTFPanner);
 }
 
 size_t HRTFPanner::sizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const
 {
     size_t amount = aMallocSizeOf(this);
 
-    if (m_databaseLoader) {
-        m_databaseLoader->sizeOfIncludingThis(aMallocSizeOf);
-    }
-
+    // NB: m_databaseLoader can be shared, so it is not measured here
     amount += m_convolverL1.sizeOfExcludingThis(aMallocSizeOf);
     amount += m_convolverR1.sizeOfExcludingThis(aMallocSizeOf);
     amount += m_convolverL2.sizeOfExcludingThis(aMallocSizeOf);
     amount += m_convolverR2.sizeOfExcludingThis(aMallocSizeOf);
     amount += m_delayLine.SizeOfExcludingThis(aMallocSizeOf);
     amount += m_tempL1.SizeOfExcludingThis(aMallocSizeOf);
     amount += m_tempL2.SizeOfExcludingThis(aMallocSizeOf);
     amount += m_tempR1.SizeOfExcludingThis(aMallocSizeOf);
--- a/dom/media/tests/mochitest/mochitest.ini
+++ b/dom/media/tests/mochitest/mochitest.ini
@@ -1,14 +1,15 @@
 [DEFAULT]
 skip-if = (os == 'win' && contentSandbox != 'off') # contentSandbox(Bug 1042735)
 support-files =
   head.js
   constraints.js
   mediaStreamPlayback.js
+  nonTrickleIce.js
   pc.js
   templates.js
   NetworkPreparationChromeScript.js
   blacksilence.js
   turnConfig.js
 
 [test_dataChannel_basicAudio.html]
 skip-if = toolkit == 'gonk' # Bug 962984 for debug, bug 963244 for opt
@@ -86,16 +87,22 @@ skip-if = toolkit == 'gonk' # b2g (Bug 1
 [test_peerConnection_bug1013809.html]
 skip-if = toolkit == 'gonk' # b2g emulator seems to be too slow (Bug 1016498 and 1008080)
 [test_peerConnection_bug1042791.html]
 skip-if = buildapp == 'b2g' || os == 'android' # bug 1043403
 [test_peerConnection_close.html]
 skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
 [test_peerConnection_errorCallbacks.html]
 skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
+[test_peerConnection_noTrickleAnswer.html]
+skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
+[test_peerConnection_noTrickleOffer.html]
+skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
+[test_peerConnection_noTrickleOfferAnswer.html]
+skip-if = toolkit == 'gonk' # b2g (Bug 1059867)
 [test_peerConnection_offerRequiresReceiveAudio.html]
 skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_peerConnection_offerRequiresReceiveVideo.html]
 skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_peerConnection_offerRequiresReceiveVideoAudio.html]
 skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
 [test_peerConnection_replaceTrack.html]
 skip-if = toolkit == 'gonk' # b2g(Bug 960442, video support for WebRTC is disabled on b2g)
new file mode 100644
--- /dev/null
+++ b/dom/media/tests/mochitest/nonTrickleIce.js
@@ -0,0 +1,130 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function makeOffererNonTrickle(chain) {
+  chain.replace('PC_LOCAL_SETUP_ICE_HANDLER', [
+    ['PC_LOCAL_SETUP_NOTRICKLE_ICE_HANDLER',
+      function (test) {
+        test.pcLocalWaitingForEndOfTrickleIce = false;
+        // We need to install this callback before calling setLocalDescription
+        // otherwise we might miss callbacks
+        test.pcLocal.setupIceCandidateHandler(test, function () {
+            // We ignore ICE candidates because we want the full offer
+          } , function (label) {
+            if (test.pcLocalWaitingForEndOfTrickleIce) {
+              // This callback is needed for slow environments where ICE
+              // trickling has not finished before the other side needs the
+              // full SDP. In this case, this call to test.next() will complete
+              // the PC_REMOTE_WAIT_FOR_OFFER step (see below).
+              info("Looks like we were still waiting for Trickle to finish");
+              // TODO replace this with a Promise
+              test.next();
+            }
+          });
+        // We can't wait for trickle to finish here as it will only start once
+        // we have called setLocalDescription in the next step
+        test.next();
+      }
+    ]
+  ]);
+  chain.replace('PC_REMOTE_GET_OFFER', [
+    ['PC_REMOTE_WAIT_FOR_OFFER',
+      function (test) {
+        if (test.pcLocal.endOfTrickleIce) {
+          info("Trickle ICE finished already");
+          test.next();
+        } else {
+          info("Waiting for trickle ICE to finish");
+          test.pcLocalWaitingForEndOfTrickleIce = true;
+          // In this case we rely on the callback from
+          // PC_LOCAL_SETUP_NOTRICKLE_ICE_HANDLER above to proceed to the next
+          // step once trickle is finished.
+        }
+      }
+    ],
+    ['PC_REMOTE_GET_FULL_OFFER',
+      function (test) {
+        test._local_offer = test.pcLocal.localDescription;
+        test._offer_constraints = test.pcLocal.constraints;
+        test._offer_options = test.pcLocal.offerOptions;
+        test.next();
+      }
+    ]
+  ]);
+  chain.insertAfter('PC_REMOTE_SANE_REMOTE_SDP', [
+    ['PC_REMOTE_REQUIRE_REMOTE_SDP_CANDIDATES',
+      function (test) {
+        info("test.pcLocal.localDescription.sdp: " + JSON.stringify(test.pcLocal.localDescription.sdp));
+        info("test._local_offer.sdp" + JSON.stringify(test._local_offer.sdp));
+        ok(!test.localRequiresTrickleIce, "Local does NOT require trickle");
+        ok(test._local_offer.sdp.contains("a=candidate"), "offer has ICE candidates")
+        // TODO check for a=end-of-candidates once implemented
+        test.next();
+      }
+    ]
+  ]);
+}
+
+function makeAnswererNonTrickle(chain) {
+  chain.replace('PC_REMOTE_SETUP_ICE_HANDLER', [
+    ['PC_REMOTE_SETUP_NOTRICKLE_ICE_HANDLER',
+      function (test) {
+        test.pcRemoteWaitingForEndOfTrickleIce = false;
+        // We need to install this callback before calling setLocalDescription
+        // otherwise we might miss callbacks
+        test.pcRemote.setupIceCandidateHandler(test, function () {
+          // We ignore ICE candidates because we want the full answer
+          }, function (label) {
+            if (test.pcRemoteWaitingForEndOfTrickleIce) {
+              // This callback is needed for slow environments where ICE
+              // trickling has not finished before the other side needs the
+              // full SDP. In this case this callback will call the step after
+              // PC_LOCAL_WAIT_FOR_ANSWER
+              info("Looks like we were still waiting for Trickle to finish");
+              // TODO replace this with a Promise
+              test.next();
+            }
+          });
+        // We can't wait for trickle to finish here as it will only start once
+        // we have called setLocalDescription in the next step
+        test.next();
+      }
+    ]
+  ]);
+  chain.replace('PC_LOCAL_GET_ANSWER', [
+    ['PC_LOCAL_WAIT_FOR_ANSWER',
+      function (test) {
+        if (test.pcRemote.endOfTrickleIce) {
+          info("Trickle ICE finished already");
+          test.next();
+        } else {
+          info("Waiting for trickle ICE to finish");
+          test.pcRemoteWaitingForEndOfTrickleIce = true;
+          // In this case we rely on the callback from
+          // PC_REMOTE_SETUP_NOTRICKLE_ICE_HANDLER above to proceed to the next
+          // step once trickle is finished.
+        }
+      }
+    ],
+    ['PC_LOCAL_GET_FULL_ANSWER',
+      function (test) {
+        test._remote_answer = test.pcRemote.localDescription;
+        test._answer_constraints = test.pcRemote.constraints;
+        test.next();
+      }
+    ]
+  ]);
+  chain.insertAfter('PC_LOCAL_SANE_REMOTE_SDP', [
+    ['PC_LOCAL_REQUIRE_REMOTE_SDP_CANDIDATES',
+      function (test) {
+        info("test.pcRemote.localDescription.sdp: " + JSON.stringify(test.pcRemote.localDescription.sdp));
+        info("test._remote_answer.sdp" + JSON.stringify(test._remote_answer.sdp));
+        ok(!test.remoteRequiresTrickleIce, "Remote does NOT require trickle");
+        ok(test._remote_answer.sdp.contains("a=candidate"), "answer has ICE candidates")
+        // TODO check for a=end-of-candidates once implemented
+        test.next();
+      }
+    ]
+  ]);
+}
--- a/dom/media/tests/mochitest/pc.js
+++ b/dom/media/tests/mochitest/pc.js
@@ -220,16 +220,30 @@ CommandChain.prototype = {
     if (index > -1) {
       return this._commands.splice(0, index);
     }
 
     return null;
   },
 
   /**
+   * Replaces a single command.
+   *
+   * @param {string} id
+   *        Identifier of the command to be replaced
+   * @param {Array[]} commands
+   *        List of commands
+   * @returns {object[]} Removed commands
+   */
+  replace : function (id, commands) {
+    this.insertBefore(id, commands);
+    return this.remove(id);
+  },
+
+  /**
    * Replaces all commands after the specified one.
    *
    * @param {string} id
    *        Identifier of the command
    * @returns {object[]} Removed commands
    */
   replaceAfter : function (id, commands) {
     var oldCommands = this.removeAfter(id);
@@ -2080,39 +2094,43 @@ PeerConnectionWrapper.prototype = {
 
   /**
    * Setup a onicecandidate handler
    *
    * @param {object} test
    *        A PeerConnectionTest object to which the ice candidates gets
    *        forwarded.
    */
-  setupIceCandidateHandler : function PCW_setupIceCandidateHandler(test) {
+  setupIceCandidateHandler : function
+    PCW_setupIceCandidateHandler(test, candidateHandler, endHandler) {
     var self = this;
     self._local_ice_candidates = [];
     self._remote_ice_candidates = [];
     self._ice_candidates_to_add = [];
 
+    candidateHandler = candidateHandler || test.iceCandidateHandler.bind(test);
+    endHandler = endHandler || test.signalEndOfTrickleIce.bind(test);
+
     function iceCandidateCallback (anEvent) {
       info(self.label + ": received iceCandidateEvent");
       if (!anEvent.candidate) {
         info(self.label + ": received end of trickle ICE event");
         self.endOfTrickleIce = true;
-        test.signalEndOfTrickleIce(self.label);
+        endHandler(self.label);
       } else {
         if (self.endOfTrickleIce) {
           ok(false, "received ICE candidate after end of trickle");
         }
         info(self.label + ": iceCandidate = " + JSON.stringify(anEvent.candidate));
         ok(anEvent.candidate.candidate.length > 0, "ICE candidate contains candidate");
         // we don't support SDP MID's yet
         ok(anEvent.candidate.sdpMid.length === 0, "SDP MID has length zero");
         ok(typeof anEvent.candidate.sdpMLineIndex === 'number', "SDP MLine Index needs to exist");
         self._local_ice_candidates.push(anEvent.candidate);
-        test.iceCandidateHandler(self.label, anEvent.candidate);
+        candidateHandler(self.label, anEvent.candidate);
       }
     }
 
     self._pc.onicecandidate = iceCandidateCallback;
   },
 
   /**
    * Counts the amount of audio tracks in a given media constraint.
new file mode 100644
--- /dev/null
+++ b/dom/media/tests/mochitest/test_peerConnection_noTrickleAnswer.html
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="application/javascript" src="head.js"></script>
+  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+  <script type="application/javascript" src="nonTrickleIce.js"></script>
+  <script type="application/javascript" src="pc.js"></script>
+  <script type="application/javascript" src="templates.js"></script>
+  <script type="application/javascript" src="turnConfig.js"></script>
+</head>
+<body>
+<pre id="test">
+<script type="application/javascript">
+  createHTML({
+    bug: "1060102",
+    title: "Basic audio only SDP answer without trickle ICE"
+  });
+
+  var test;
+  runNetworkTest(function (options) {
+    test = new PeerConnectionTest(options);
+    makeAnswererNonTrickle(test.chain);
+    test.setMediaConstraints([{audio: true}], [{audio: true}]);
+    test.run();
+  });
+</script>
+</pre>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/dom/media/tests/mochitest/test_peerConnection_noTrickleOffer.html
@@ -0,0 +1,31 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="application/javascript" src="head.js"></script>
+  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+  <script type="application/javascript" src="nonTrickleIce.js"></script>
+  <script type="application/javascript" src="pc.js"></script>
+  <script type="application/javascript" src="templates.js"></script>
+  <script type="application/javascript" src="turnConfig.js"></script>
+</head>
+<body>
+<pre id="test">
+<script type="application/javascript">
+  createHTML({
+    bug: "1060102",
+    title: "Basic audio only SDP offer without trickle ICE"
+  });
+
+  var test;
+  runNetworkTest(function (options) {
+    test = new PeerConnectionTest(options);
+    makeOffererNonTrickle(test.chain);
+    test.setMediaConstraints([{audio: true}], [{audio: true}]);
+    test.run();
+  });
+</script>
+</pre>
+</body>
+</html>
new file mode 100644
--- /dev/null
+++ b/dom/media/tests/mochitest/test_peerConnection_noTrickleOfferAnswer.html
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML>
+<html>
+<head>
+  <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
+  <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
+  <script type="application/javascript" src="head.js"></script>
+  <script type="application/javascript" src="mediaStreamPlayback.js"></script>
+  <script type="application/javascript" src="nonTrickleIce.js"></script>
+  <script type="application/javascript" src="pc.js"></script>
+  <script type="application/javascript" src="templates.js"></script>
+  <script type="application/javascript" src="turnConfig.js"></script>
+</head>
+<body>
+<pre id="test">
+<script type="application/javascript">
+  createHTML({
+    bug: "1060102",
+    title: "Basic audio only SDP offer and answer without trickle ICE"
+  });
+
+  var test;
+  runNetworkTest(function (options) {
+    test = new PeerConnectionTest(options);
+    makeOffererNonTrickle(test.chain);
+    makeAnswererNonTrickle(test.chain);
+    test.setMediaConstraints([{audio: true}], [{audio: true}]);
+    test.run();
+  });
+</script>
+</pre>
+</body>
+</html>
--- a/dom/system/gonk/RadioInterfaceLayer.js
+++ b/dom/system/gonk/RadioInterfaceLayer.js
@@ -1155,27 +1155,35 @@ DataConnectionHandler.prototype = {
         return Ci.nsINetworkInterface.NETWORK_TYPE_MOBILE_IMS;
       case "dun":
         return Ci.nsINetworkInterface.NETWORK_TYPE_MOBILE_DUN;
       default:
         return Ci.nsINetworkInterface.NETWORK_TYPE_UNKNOWN;
      }
   },
 
+  _compareDataCallOptions: function(dataCall, newDataCall) {
+    return dataCall.apnProfile.apn == newDataCall.apn &&
+           dataCall.apnProfile.user == newDataCall.user &&
+           dataCall.apnProfile.password == newDataCall.password &&
+           dataCall.chappap == newDataCall.chappap &&
+           dataCall.pdptype == newDataCall.pdptype;
+  },
+
   _deliverDataCallMessage: function(name, args) {
     for (let i = 0; i < this._dataCalls.length; i++) {
       let datacall = this._dataCalls[i];
-      // Send message only to the DataCall that matches apn.
+      // Send message only to the DataCall that matches the data call options.
       // Currently, args always contain only one datacall info.
-      if (!args[0].apn || args[0].apn != datacall.apnProfile.apn) {
+      if (!this._compareDataCallOptions(datacall, args[0])) {
         continue;
       }
       // Do not deliver message to DataCall that contains cid but mistmaches
       // with the cid in the current message.
-      if (args[0].cid && datacall.linkInfo.cid &&
+      if (args[0].cid !== undefined && datacall.linkInfo.cid != null &&
           args[0].cid != datacall.linkInfo.cid) {
         continue;
       }
 
       try {
         let handler = datacall[name];
         if (typeof handler !== "function") {
           throw new Error("No handler for " + name);
@@ -1481,19 +1489,27 @@ DataConnectionHandler.prototype = {
 
   /**
    * Handle data errors.
    */
   handleDataCallError: function(message) {
     // Notify data call error only for data APN
     let networkInterface = this.dataNetworkInterfaces.get("default");
     if (networkInterface && networkInterface.enabled) {
-      let apnSetting = networkInterface.apnSetting;
-      if (message.apn == apnSetting.apn) {
-        gMobileConnectionService.notifyDataError(this.clientId, message);
+      let dataCall = networkInterface.dataCall;
+      // If there is a cid, compare cid; otherwise it is probably an error on
+      // data call setup.
+      if (message.cid !== undefined) {
+        if (message.cid == dataCall.linkInfo.cid) {
+          gMobileConnectionService.notifyDataError(this.clientId, message);
+        }
+      } else {
+        if (this._compareDataCallOptions(dataCall, message)) {
+          gMobileConnectionService.notifyDataError(this.clientId, message);
+        }
       }
     }
 
     this._deliverDataCallMessage("dataCallError", [message]);
   },
 
   /**
    * Handle data call state changes.
@@ -3889,16 +3905,22 @@ DataCall.prototype = {
   timer: null,
 
   // APN failed connections. Retry counter
   apnRetryCounter: 0,
 
   // Array to hold RILNetworkInterfaces that requested this DataCall.
   requestedNetworkIfaces: null,
 
+  // Holds the pdp type sent to ril worker.
+  pdptype: null,
+
+  // Holds the authentication type sent to ril worker.
+  chappap: null,
+
   dataCallError: function(message) {
     if (DEBUG) this.debug("Data call error on APN: " + message.apn);
     this.state = RIL.GECKO_NETWORK_STATE_DISCONNECTED;
     this.retry();
   },
 
   dataCallStateChanged: function(datacall) {
     if (DEBUG) {
@@ -4007,31 +4029,42 @@ DataCall.prototype = {
       if (this.requestedNetworkIfaces[i].type == type) {
         return true;
       }
     }
     return false;
   },
 
   canHandleApn: function(apnSetting) {
-    // TODO: compare authtype?
-    return (this.apnProfile.apn == apnSetting.apn &&
-            (this.apnProfile.user || '') == (apnSetting.user || '') &&
-            (this.apnProfile.password || '') == (apnSetting.password || ''));
+    let isIdentical = this.apnProfile.apn == apnSetting.apn &&
+                      (this.apnProfile.user || '') == (apnSetting.user || '') &&
+                      (this.apnProfile.password || '') == (apnSetting.password || '') &&
+                      (this.apnProfile.authType || '') == (apnSetting.authtype || '');
+
+    if (RILQUIRKS_HAVE_IPV6) {
+      isIdentical = isIdentical &&
+                    (this.apnProfile.protocol || '') == (apnSetting.protocol || '') &&
+                    (this.apnProfile.roaming_protocol || '') == (apnSetting.roaming_protocol || '');
+    }
+
+    return isIdentical;
   },
 
   reset: function() {
     this.linkInfo.cid = null;
     this.linkInfo.ifname = null;
     this.linkInfo.ips = [];
     this.linkInfo.prefixLengths = [];
     this.linkInfo.dnses = [];
     this.linkInfo.gateways = [];
 
     this.state = RIL.GECKO_NETWORK_STATE_UNKNOWN;
+
+    this.chappap = null;
+    this.pdptype = null;
   },
 
   connect: function(networkInterface) {
     if (DEBUG) this.debug("connect: " + networkInterface.type);
 
     if (this.requestedNetworkIfaces.indexOf(networkInterface) == -1) {
       this.requestedNetworkIfaces.push(networkInterface);
     }
@@ -4074,38 +4107,41 @@ DataCall.prototype = {
     if (dataInfo == null ||
         dataInfo.state != RIL.GECKO_MOBILE_CONNECTION_STATE_REGISTERED ||
         dataInfo.type == RIL.GECKO_MOBILE_CONNECTION_STATE_UNKNOWN) {
       return;
     }
 
     let radioTechType = dataInfo.type;
     let radioTechnology = RIL.GECKO_RADIO_TECH.indexOf(radioTechType);
-    let authType = RIL.RIL_DATACALL_AUTH_TO_GECKO.indexOf(this.apnProfile.authtype);
+    let authType = RIL.RIL_DATACALL_AUTH_TO_GECKO.indexOf(this.apnProfile.authType);
     // Use the default authType if the value in database is invalid.
     // For the case that user might not select the authentication type.
     if (authType == -1) {
       if (DEBUG) {
         this.debug("Invalid authType " + this.apnProfile.authtype);
       }
       authType = RIL.RIL_DATACALL_AUTH_TO_GECKO.indexOf(RIL.GECKO_DATACALL_AUTH_DEFAULT);
     }
+    this.chappap = authType;
+
     let pdpType = RIL.GECKO_DATACALL_PDP_TYPE_IP;
     if (RILQUIRKS_HAVE_IPV6) {
       pdpType = !dataInfo.roaming
               ? this.apnProfile.protocol
               : this.apnProfile.roaming_protocol;
       if (RIL.RIL_DATACALL_PDP_TYPES.indexOf(pdpType) < 0) {
         if (DEBUG) {
           this.debug("Invalid pdpType '" + pdpType + "', using '" +
                      RIL.GECKO_DATACALL_PDP_TYPE_DEFAULT + "'");
         }
         pdpType = RIL.GECKO_DATACALL_PDP_TYPE_DEFAULT;
       }
     }
+    this.pdptype = pdpType;
 
     let radioInterface = this.gRIL.getRadioInterface(this.clientId);
     radioInterface.sendWorkerMessage("setupDataCall", {
       radioTech: radioTechnology,
       apn: this.apnProfile.apn,
       user: this.apnProfile.user,
       passwd: this.apnProfile.password,
       chappap: authType,
--- a/dom/system/gonk/ril_worker.js
+++ b/dom/system/gonk/ril_worker.js
@@ -4123,20 +4123,18 @@ RilObject.prototype = {
     return "identical";
   },
 
   _processDataCallList: function(datacalls, newDataCallOptions) {
     // Check for possible PDP errors: We check earlier because the datacall
     // can be removed if is the same as the current one.
     for each (let newDataCall in datacalls) {
       if (newDataCall.status != DATACALL_FAIL_NONE) {
-        if (newDataCallOptions) {
-          newDataCall.apn = newDataCallOptions.apn;
-        }
-        this._sendDataCallError(newDataCall, newDataCall.status);
+        this._sendDataCallError(newDataCallOptions || newDataCall,
+                                newDataCall.status);
       }
     }
 
     for each (let currentDataCall in this.currentDataCalls) {
       let updatedDataCall;
       if (datacalls) {
         updatedDataCall = datacalls[currentDataCall.cid];
         delete datacalls[currentDataCall.cid];
--- a/js/src/doc/Debugger/Debugger.Memory.md
+++ b/js/src/doc/Debugger/Debugger.Memory.md
@@ -86,22 +86,35 @@ following accessor properties from its p
     value is `5000`.
 
 
 Function Properties of the `Debugger.Memory.prototype` Object
 -------------------------------------------------------------
 
 <code id='drain-alloc-log'>drainAllocationsLog()</code>
 :   When `trackingAllocationSites` is `true`, this method returns an array of
-    allocation sites (as [captured stacks][saved-frame]) for recent `Object`
-    allocations within the set of debuggees. Entries for objects allocated with
-    no JavaScript frames on the stack are `null`. *Recent* is defined as the
-    `maxAllocationsLogLength` most recent `Object` allocations since the last
-    call to `drainAllocationsLog`. Therefore, calling this method effectively
-    clears the log.
+    recent `Object` allocations within the set of debuggees. *Recent* is
+    defined as the `maxAllocationsLogLength` most recent `Object` allocations
+    since the last call to `drainAllocationsLog`. Therefore, calling this
+    method effectively clears the log.
+
+    Objects in the array are of the form:
+
+    <pre class='language-js'><code>
+    {
+      "timestamp": <i>timestamp</i>,
+      "frame": <i>allocationSite</i>
+    }
+    </code></pre>
+
+    Here <i>timestamp</i> is the timestamp of the event in units of
+    microseconds since the epoch and <i>allocationSite</i> is an
+    allocation site (as a [captured stack][saved-frame]).
+    <i>allocationSite</i> is `null` for objects allocated with no
+    JavaScript frames on the stack.
 
     When `trackingAllocationSites` is `false`, `drainAllocationsLog()` throws an
     `Error`.
 
 <code id='take-census'>takeCensus()</code>
 :   Carry out a census of the debuggee compartments' contents. A *census* is a
     complete traversal of the graph of all reachable memory items belonging to a
     particular `Debugger`'s debuggees. The census produces a count of those
--- a/js/src/doc/Debugger/Tutorial-Alloc-Log-Tree.md
+++ b/js/src/doc/Debugger/Tutorial-Alloc-Log-Tree.md
@@ -78,17 +78,17 @@ 3)  Enter the following code in the Scra
         // allocation counts. Note that stack entries are '===' if
         // they represent the same site with the same callers.
         var counts = new Map;
         for (let site of log) {
           // This is a kludge, necessary for now. The saved stacks
           // are new, and Firefox doesn't yet understand that they
           // are safe for chrome code to use, so we must tell it
           // so explicitly.
-          site = Components.utils.waiveXrays(site);
+          site = Components.utils.waiveXrays(site.frame);
 
           if (!counts.has(site))
             counts.set(site, 0);
           counts.set(site, counts.get(site) + 1);
         }
 
         // Walk from each site that allocated something up to the
         // root, computing allocation totals that include
--- a/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-01.js
+++ b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-01.js
@@ -20,12 +20,12 @@ root.eval("(" + function immediate() {
 const allocs = dbg.memory.drainAllocationsLog();
 print(allocs.join("\n--------------------------------------------------------------------------\n"));
 print("Total number of allocations logged: " + allocs.length);
 
 let idx = -1;
 for (let object of root.tests) {
   let wrappedObject = wrappedRoot.makeDebuggeeValue(object);
   let allocSite = wrappedObject.allocationSite;
-  let newIdx = allocs.indexOf(allocSite);
+  let newIdx = allocs.map(x => x.frame).indexOf(allocSite);
   assertEq(newIdx > idx, true);
   idx = newIdx;
 }
--- a/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-03.js
+++ b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-03.js
@@ -14,11 +14,11 @@ root.eval([
   "this.alloc4 = {};", // line 4
 ].join("\n"));
 
 const allocs = dbg.memory.drainAllocationsLog();
 
 // Should have stayed at the maximum length.
 assertEq(allocs.length, 3);
 // Should have kept the most recent allocation.
-assertEq(allocs[2].line, 4);
+assertEq(allocs[2].frame.line, 4);
 // Should have thrown away the oldest allocation.
-assertEq(allocs.map(x => x.line).indexOf(1), -1);
+assertEq(allocs.map(x => x.frame.line).indexOf(1), -1);
--- a/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-08.js
+++ b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-08.js
@@ -16,15 +16,15 @@ root.eval([
 
 dbg.memory.trackingAllocationSites = true;
 
 root.doFirstAlloc();
 let allocs1 = dbg.memory.drainAllocationsLog();
 root.doSecondAlloc();
 let allocs2 = dbg.memory.drainAllocationsLog();
 
-let allocs1Lines = allocs1.map(x => x.line);
+let allocs1Lines = allocs1.map(x => x.frame.line);
 assertEq(allocs1Lines.indexOf(root.firstAllocLine) != -1, true);
 assertEq(allocs1Lines.indexOf(root.secondAllocLine) == -1, true);
 
-let allocs2Lines = allocs2.map(x => x.line);
+let allocs2Lines = allocs2.map(x => x.frame.line);
 assertEq(allocs2Lines.indexOf(root.secondAllocLine) != -1, true);
 assertEq(allocs2Lines.indexOf(root.firstAllocLine) == -1, true);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/debug/Memory-drainAllocationsLog-14.js
@@ -0,0 +1,47 @@
+// Test that drainAllocationsLog returns some timestamps.
+
+load(libdir + 'asserts.js');
+
+var allocTimes = [];
+
+allocTimes.push(1000 * dateNow());
+
+const root = newGlobal();
+const dbg = new Debugger(root);
+
+dbg.memory.trackingAllocationSites = true;
+root.eval("this.alloc1 = {}");
+allocTimes.push(1000 * dateNow());
+root.eval("this.alloc2 = {}");
+allocTimes.push(1000 * dateNow());
+root.eval("this.alloc3 = {}");
+allocTimes.push(1000 * dateNow());
+root.eval("this.alloc4 = {}");
+allocTimes.push(1000 * dateNow());
+
+allocs = dbg.memory.drainAllocationsLog();
+assertEq(allocs.length >= 4, true);
+assertEq(allocs[0].timestamp >= allocTimes[0], true);
+var seenAlloc = 0;
+var lastIndexSeenAllocIncremented = 0;
+for (i = 1; i < allocs.length; ++i) {
+    assertEq(allocs[i].timestamp >= allocs[i - 1].timestamp, true);
+    // It isn't possible to exactly correlate the entries in the
+    // allocs array with the entries in allocTimes, because we can't
+    // control exactly how many allocations are done during the course
+    // of a given eval.  However, we can assume that there is some
+    // allocation recorded after each entry in allocTimes.  So, we
+    // track the allocTimes entry we've passed, and then after the
+    // loop assert that we've seen them all.  We also assert that a
+    // non-zero number of allocations has happened since the last seen
+    // increment.
+    while (seenAlloc < allocTimes.length
+           && allocs[i].timestamp >= allocTimes[seenAlloc]) {
+        assertEq(i - lastIndexSeenAllocIncremented > 0, true);
+        lastIndexSeenAllocIncremented = i;
+        ++seenAlloc;
+    }
+}
+// There should be one entry left in allocTimes, because we recorded a
+// time after the last possible allocation in the array.
+assertEq(seenAlloc, allocTimes.length -1);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/ion/bug1079850.js
@@ -0,0 +1,5 @@
+function foo() {
+    for(__key in null)
+        var key=startTest(VERSION) ? this : this;
+    if (key !== undefined) {}
+} foo();
--- a/js/src/jit/TypePolicy.cpp
+++ b/js/src/jit/TypePolicy.cpp
@@ -861,20 +861,26 @@ FilterTypeSetPolicy::adjustInputs(TempAl
 
     // Output is a value, box the input.
     if (outputType == MIRType_Value) {
         MOZ_ASSERT(inputType != MIRType_Value);
         ins->replaceOperand(0, boxAt(alloc, ins, ins->getOperand(0)));
         return true;
     }
 
-    // The outputType should always be a subset of the inputType.
-    // So if types don't equal, the input type is definitely a MIRType_Value.
-    if (inputType != MIRType_Value)
-        MOZ_CRASH("Types should be in accordance.");
+    // The outputType should be a subset of the inputType else we are in code
+    // that has never executed yet. Bail to see the new type (if that hasn't
+    // happened yet).
+    if (inputType != MIRType_Value) {
+        MBail *bail = MBail::New(alloc);
+        ins->block()->insertBefore(ins, bail);
+        bail->setDependency(ins->dependency());
+        ins->setDependency(bail);
+        ins->replaceOperand(0, boxAt(alloc, ins, ins->getOperand(0)));
+    }
 
     // We can't unbox a value to null/undefined/lazyargs. So keep output
     // also a value.
     // Note: Using setResultType shouldn't be done in TypePolicies,
     //       Here it is fine, since the type barrier has no uses.
     if (IsNullOrUndefined(outputType) || outputType == MIRType_MagicOptimizedArguments) {
         MOZ_ASSERT(!ins->hasDefUses());
         ins->setResultType(MIRType_Value);
--- a/js/src/vm/CommonPropertyNames.h
+++ b/js/src/vm/CommonPropertyNames.h
@@ -70,16 +70,17 @@
     macro(fieldOffsets, fieldOffsets, "fieldOffsets") \
     macro(fieldTypes, fieldTypes, "fieldTypes") \
     macro(fileName, fileName, "fileName") \
     macro(fix, fix, "fix") \
     macro(float32, float32, "float32") \
     macro(float32x4, float32x4, "float32x4") \
     macro(float64, float64, "float64") \
     macro(format, format, "format") \
+    macro(frame, frame, "frame") \
     macro(from, from, "from") \
     macro(get, get, "get") \
     macro(getInternals, getInternals, "getInternals") \
     macro(getOwnPropertyDescriptor, getOwnPropertyDescriptor, "getOwnPropertyDescriptor") \
     macro(getOwnPropertyNames, getOwnPropertyNames, "getOwnPropertyNames") \
     macro(getPropertyDescriptor, getPropertyDescriptor, "getPropertyDescriptor") \
     macro(global, global, "global") \
     macro(Handle, Handle, "Handle") \
@@ -170,16 +171,17 @@
     macro(source, source, "source") \
     macro(stack, stack, "stack") \
     macro(sticky, sticky, "sticky") \
     macro(strings, strings, "strings") \
     macro(StructType, StructType, "StructType") \
     macro(style, style, "style") \
     macro(test, test, "test") \
     macro(throw, throw_, "throw") \
+    macro(timestamp, timestamp, "timestamp") \
     macro(timeZone, timeZone, "timeZone") \
     macro(toGMTString, toGMTString, "toGMTString") \
     macro(toISOString, toISOString, "toISOString") \
     macro(toJSON, toJSON, "toJSON") \
     macro(toLocaleString, toLocaleString, "toLocaleString") \
     macro(toSource, toSource, "toSource") \
     macro(toString, toString, "toString") \
     macro(toUTCString, toUTCString, "toUTCString") \
--- a/js/src/vm/Debugger.cpp
+++ b/js/src/vm/Debugger.cpp
@@ -1509,47 +1509,47 @@ Debugger::slowPathOnNewGlobalObject(JSCo
             if (status != JSTRAP_CONTINUE && status != JSTRAP_RETURN)
                 break;
         }
     }
     MOZ_ASSERT(!cx->isExceptionPending());
 }
 
 /* static */ bool
-Debugger::slowPathOnLogAllocationSite(JSContext *cx, HandleSavedFrame frame,
+Debugger::slowPathOnLogAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when,
                                       GlobalObject::DebuggerVector &dbgs)
 {
     MOZ_ASSERT(!dbgs.empty());
     mozilla::DebugOnly<Debugger **> begin = dbgs.begin();
 
     for (Debugger **dbgp = dbgs.begin(); dbgp < dbgs.end(); dbgp++) {
         // The set of debuggers had better not change while we're iterating,
         // such that the vector gets reallocated.
         MOZ_ASSERT(dbgs.begin() == begin);
 
         if ((*dbgp)->trackingAllocationSites &&
             (*dbgp)->enabled &&
-            !(*dbgp)->appendAllocationSite(cx, frame))
+            !(*dbgp)->appendAllocationSite(cx, frame, when))
         {
             return false;
         }
     }
 
     return true;
 }
 
 bool
-Debugger::appendAllocationSite(JSContext *cx, HandleSavedFrame frame)
+Debugger::appendAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when)
 {
     AutoCompartment ac(cx, object);
     RootedObject wrapped(cx, frame);
     if (!cx->compartment()->wrap(cx, &wrapped))
         return false;
 
-    AllocationSite *allocSite = cx->new_<AllocationSite>(wrapped);
+    AllocationSite *allocSite = cx->new_<AllocationSite>(wrapped, when);
     if (!allocSite)
         return false;
 
     allocationsLog.insertBack(allocSite);
 
     if (allocationsLogLength >= maxAllocationsLogLength) {
         js_delete(allocationsLog.getFirst());
     } else {
--- a/js/src/vm/Debugger.h
+++ b/js/src/vm/Debugger.h
@@ -197,30 +197,31 @@ class Debugger : private mozilla::Linked
     HeapPtrNativeObject object;         /* The Debugger object. Strong reference. */
     GlobalObjectSet debuggees;          /* Debuggee globals. Cross-compartment weak references. */
     js::HeapPtrObject uncaughtExceptionHook; /* Strong reference. */
     bool enabled;
     JSCList breakpoints;                /* Circular list of all js::Breakpoints in this debugger */
 
     struct AllocationSite : public mozilla::LinkedListElement<AllocationSite>
     {
-        explicit AllocationSite(HandleObject frame) : frame(frame) {
+        AllocationSite(HandleObject frame, int64_t when) : frame(frame), when(when) {
             MOZ_ASSERT_IF(frame, UncheckedUnwrap(frame)->is<SavedFrame>());
         };
         RelocatablePtrObject frame;
+        int64_t when;
     };
     typedef mozilla::LinkedList<AllocationSite> AllocationSiteList;
 
     bool trackingAllocationSites;
     double allocationSamplingProbability;
     AllocationSiteList allocationsLog;
     size_t allocationsLogLength;
     size_t maxAllocationsLogLength;
     static const size_t DEFAULT_MAX_ALLOCATIONS_LOG_LENGTH = 5000;
-    bool appendAllocationSite(JSContext *cx, HandleSavedFrame frame);
+    bool appendAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when);
     void emptyAllocationsLog();
 
     /*
      * If this Debugger is enabled, and has a onNewGlobalObject handler, then
      * this link is inserted into the circular list headed by
      * JSRuntime::onNewGlobalObjectWatchers. Otherwise, this is set to a
      * singleton cycle.
      */
@@ -371,17 +372,17 @@ class Debugger : private mozilla::Linked
 
     static JSTrapStatus slowPathOnEnterFrame(JSContext *cx, AbstractFramePtr frame);
     static bool slowPathOnLeaveFrame(JSContext *cx, AbstractFramePtr frame, bool ok);
     static JSTrapStatus slowPathOnExceptionUnwind(JSContext *cx, AbstractFramePtr frame);
     static void slowPathOnNewScript(JSContext *cx, HandleScript script,
                                     GlobalObject *compileAndGoGlobal);
     static void slowPathOnNewGlobalObject(JSContext *cx, Handle<GlobalObject *> global);
     static bool slowPathOnLogAllocationSite(JSContext *cx, HandleSavedFrame frame,
-                                            GlobalObject::DebuggerVector &dbgs);
+                                            int64_t when, GlobalObject::DebuggerVector &dbgs);
     static JSTrapStatus dispatchHook(JSContext *cx, MutableHandleValue vp, Hook which);
 
     JSTrapStatus fireDebuggerStatement(JSContext *cx, MutableHandleValue vp);
     JSTrapStatus fireExceptionUnwind(JSContext *cx, MutableHandleValue vp);
     JSTrapStatus fireEnterFrame(JSContext *cx, AbstractFramePtr frame, MutableHandleValue vp);
     JSTrapStatus fireNewGlobalObject(JSContext *cx, Handle<GlobalObject *> global, MutableHandleValue vp);
 
     /*
@@ -504,17 +505,17 @@ class Debugger : private mozilla::Linked
      *   pending exception.
      *
      * - JSTRAP_RETURN: Return from |frame|. onExceptionUnwind has cleared
      *   |cx|'s pending exception and set |frame|'s return value.
      */
     static inline JSTrapStatus onExceptionUnwind(JSContext *cx, AbstractFramePtr frame);
     static inline void onNewScript(JSContext *cx, HandleScript script, GlobalObject *compileAndGoGlobal);
     static inline void onNewGlobalObject(JSContext *cx, Handle<GlobalObject *> global);
-    static inline bool onLogAllocationSite(JSContext *cx, HandleSavedFrame frame);
+    static inline bool onLogAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when);
     static JSTrapStatus onTrap(JSContext *cx, MutableHandleValue vp);
     static JSTrapStatus onSingleStep(JSContext *cx, MutableHandleValue vp);
     static bool handleBaselineOsr(JSContext *cx, InterpreterFrame *from, jit::BaselineFrame *to);
     static bool handleIonBailout(JSContext *cx, jit::RematerializedFrame *from, jit::BaselineFrame *to);
     static void propagateForcedReturn(JSContext *cx, AbstractFramePtr frame, HandleValue rval);
 
     /************************************* Functions for use by Debugger.cpp. */
 
@@ -827,22 +828,22 @@ Debugger::onNewGlobalObject(JSContext *c
 #ifdef DEBUG
     global->compartment()->firedOnNewGlobalObject = true;
 #endif
     if (!JS_CLIST_IS_EMPTY(&cx->runtime()->onNewGlobalObjectWatchers))
         Debugger::slowPathOnNewGlobalObject(cx, global);
 }
 
 bool
-Debugger::onLogAllocationSite(JSContext *cx, HandleSavedFrame frame)
+Debugger::onLogAllocationSite(JSContext *cx, HandleSavedFrame frame, int64_t when)
 {
     GlobalObject::DebuggerVector *dbgs = cx->global()->getDebuggers();
     if (!dbgs || dbgs->empty())
         return true;
-    return Debugger::slowPathOnLogAllocationSite(cx, frame, *dbgs);
+    return Debugger::slowPathOnLogAllocationSite(cx, frame, when, *dbgs);
 }
 
 extern bool
 EvaluateInEnv(JSContext *cx, Handle<Env*> env, HandleValue thisv, AbstractFramePtr frame,
               mozilla::Range<const char16_t> chars, const char *filename, unsigned lineno,
               MutableHandleValue rval);
 
 bool ReportObjectRequired(JSContext *cx);
--- a/js/src/vm/DebuggerMemory.cpp
+++ b/js/src/vm/DebuggerMemory.cpp
@@ -194,19 +194,31 @@ DebuggerMemory::drainAllocationsLog(JSCo
     size_t length = dbg->allocationsLogLength;
 
     RootedArrayObject result(cx, NewDenseFullyAllocatedArray(cx, length));
     if (!result)
         return false;
     result->ensureDenseInitializedLength(cx, 0, length);
 
     for (size_t i = 0; i < length; i++) {
-        Debugger::AllocationSite *allocSite = dbg->allocationsLog.popFirst();
-        result->setDenseElement(i, ObjectOrNullValue(allocSite->frame));
-        js_delete(allocSite);
+        RootedObject obj(cx, NewBuiltinClassInstance(cx, &JSObject::class_));
+        if (!obj)
+            return false;
+
+        mozilla::UniquePtr<Debugger::AllocationSite, JS::DeletePolicy<Debugger::AllocationSite> >
+            allocSite(dbg->allocationsLog.popFirst());
+        RootedValue frame(cx, ObjectOrNullValue(allocSite->frame));
+        if (!JSObject::defineProperty(cx, obj, cx->names().frame, frame))
+            return false;
+
+        RootedValue timestampValue(cx, NumberValue(allocSite->when));
+        if (!JSObject::defineProperty(cx, obj, cx->names().timestamp, timestampValue))
+            return false;
+
+        result->setDenseElement(i, ObjectValue(*obj));
     }
 
     dbg->allocationsLogLength = 0;
     args.rval().setObject(*result);
     return true;
 }
 
 /* static */ bool
--- a/js/src/vm/SavedStacks.cpp
+++ b/js/src/vm/SavedStacks.cpp
@@ -11,16 +11,17 @@
 #include <math.h>
 
 #include "jsapi.h"
 #include "jscompartment.h"
 #include "jsfriendapi.h"
 #include "jshashutil.h"
 #include "jsmath.h"
 #include "jsnum.h"
+#include "prmjtime.h"
 
 #include "gc/Marking.h"
 #include "js/Vector.h"
 #include "vm/Debugger.h"
 #include "vm/GlobalObject.h"
 #include "vm/StringBuffer.h"
 
 #include "jscntxtinlines.h"
@@ -803,17 +804,17 @@ SavedStacksMetadataCallback(JSContext *c
                                                 std::log(notSamplingProb));
     }
 
     RootedSavedFrame frame(cx);
     if (!stacks.saveCurrentStack(cx, &frame))
         return false;
     *pmetadata = frame;
 
-    return Debugger::onLogAllocationSite(cx, frame);
+    return Debugger::onLogAllocationSite(cx, frame, PRMJ_Now());
 }
 
 #ifdef JS_CRASH_DIAGNOSTICS
 void
 CompartmentChecker::check(SavedStacks *stacks)
 {
     if (&compartment->savedStacks() != stacks) {
         printf("*** Compartment SavedStacks mismatch: %p vs. %p\n",
--- a/memory/replace/dmd/DMD.cpp
+++ b/memory/replace/dmd/DMD.cpp
@@ -1397,28 +1397,26 @@ Init(const malloc_table_t* aMallocTable)
 
   gMallocTable = aMallocTable;
 
   // DMD is controlled by the |DMD| environment variable.
   // - If it's unset or empty or "0", DMD doesn't run.
   // - Otherwise, the contents dictate DMD's behaviour.
 
   char* e = getenv("DMD");
-  StatusMsg("$DMD = '%s'\n", e);
 
   if (!e || strcmp(e, "") == 0 || strcmp(e, "0") == 0) {
-    StatusMsg("DMD is not enabled\n");
     return;
   }
 
+  StatusMsg("$DMD = '%s'\n", e);
+
   // Parse $DMD env var.
   gOptions = InfallibleAllocPolicy::new_<Options>(e);
 
-  StatusMsg("DMD is enabled\n");
-
 #ifdef XP_MACOSX
   // On Mac OS X we need to call StackWalkInitCriticalAddress() very early
   // (prior to the creation of any mutexes, apparently) otherwise we can get
   // hangs when getting stack traces (bug 821577).  But
   // StackWalkInitCriticalAddress() isn't exported from xpcom/, so instead we
   // just call NS_StackWalk, because that calls StackWalkInitCriticalAddress().
   // See the comment above StackWalkInitCriticalAddress() for more details.
   (void)NS_StackWalk(NopStackWalkCallback, /* skipFrames */ 0,
@@ -1440,29 +1438,37 @@ Init(const malloc_table_t* aMallocTable)
     gBlockTable = InfallibleAllocPolicy::new_<BlockTable>();
     gBlockTable->init(8192);
   }
 
   if (gOptions->IsTestMode()) {
     // Do all necessary allocations before setting gIsDMDRunning so those
     // allocations don't show up in our results.  Once gIsDMDRunning is set we
     // are intercepting malloc et al. in earnest.
+    //
+    // These files are written to $CWD. It would probably be better to write
+    // them to "TmpD" using the directory service, but that would require
+    // linking DMD with XPCOM.
     auto f1 = MakeUnique<FpWriteFunc>(OpenOutputFile("full1.json"));
     auto f2 = MakeUnique<FpWriteFunc>(OpenOutputFile("full2.json"));
     auto f3 = MakeUnique<FpWriteFunc>(OpenOutputFile("full3.json"));
     auto f4 = MakeUnique<FpWriteFunc>(OpenOutputFile("full4.json"));
     gIsDMDRunning = true;
 
     StatusMsg("running test mode...\n");
     RunTestMode(Move(f1), Move(f2), Move(f3), Move(f4));
-    StatusMsg("finished test mode\n");
-    exit(0);
+    StatusMsg("finished test mode; DMD is now disabled again\n");
+
+    // Continue running so that the xpcshell test can complete, but DMD no
+    // longer needs to be running.
+    gIsDMDRunning = false;
+
+  } else {
+    gIsDMDRunning = true;
   }
-
-  gIsDMDRunning = true;
 }
 
 //---------------------------------------------------------------------------
 // DMD reporting and unreporting
 //---------------------------------------------------------------------------
 
 static void
 ReportHelper(const void* aPtr, bool aReportedOnAlloc)
@@ -1830,243 +1836,259 @@ AnalyzeReports(JSONWriter& aWriter)
 }
 
 //---------------------------------------------------------------------------
 // Testing
 //---------------------------------------------------------------------------
 
 // This function checks that heap blocks that have the same stack trace but
 // different (or no) reporters get aggregated separately.
-void foo()
+void Foo(int aSeven)
 {
-   char* a[6];
-   for (int i = 0; i < 6; i++) {
-      a[i] = (char*) malloc(128 - 16*i);
-   }
+  char* a[6];
+  for (int i = 0; i < aSeven - 1; i++) {
+    a[i] = (char*) replace_malloc(128 - 16*i);
+  }
 
-   for (int i = 0; i <= 1; i++)
-      Report(a[i]);                     // reported
-   Report(a[2]);                        // reported
-   Report(a[3]);                        // reported
-   // a[4], a[5] unreported
+  for (int i = 0; i < aSeven - 5; i++) {
+    Report(a[i]);                   // reported
+  }
+  Report(a[2]);                     // reported
+  Report(a[3]);                     // reported
+  // a[4], a[5] unreported
 }
 
 // This stops otherwise-unused variables from being optimized away.
 static void
-UseItOrLoseIt(void* a)
+UseItOrLoseIt(void* aPtr, int aSeven)
 {
   char buf[64];
-  sprintf(buf, "%p\n", a);
-  fwrite(buf, 1, strlen(buf) + 1, stderr);
+  int n = sprintf(buf, "%p\n", aPtr);
+  if (n == 20 + aSeven) {
+    fprintf(stderr, "well, that is surprising");
+  }
 }
 
-// The output from this should be tested with check_test_output.py.  It's been
-// tested on Linux64, and probably will give different results on other
-// platforms.
+// The output from this function feeds into DMD's xpcshell test.
 static void
 RunTestMode(UniquePtr<FpWriteFunc> aF1, UniquePtr<FpWriteFunc> aF2,
             UniquePtr<FpWriteFunc> aF3, UniquePtr<FpWriteFunc> aF4)
 {
+  // This test relies on the compiler not doing various optimizations, such as
+  // eliding unused replace_malloc() calls or unrolling loops with fixed
+  // iteration counts. So we want a constant value that the compiler can't
+  // determine statically, and we use that in various ways to prevent the above
+  // optimizations from happening.
+  //
+  // This code always sets |seven| to the value 7. It works because we know
+  // that "--mode=test" must be within the DMD environment variable if we reach
+  // here, but the compiler almost certainly does not.
+  //
+  char* env = getenv("DMD");
+  char* p1 = strstr(env, "--mode=t");
+  char* p2 = strstr(p1, "test");
+  int seven = p2 - p1;
+
   // The first part of this test requires sampling to be disabled.
   gOptions->SetSampleBelowSize(1);
 
   //---------
 
   // AnalyzeReports 1.  Zero for everything.
   JSONWriter writer1(Move(aF1));
   AnalyzeReports(writer1);
 
   //---------
 
   // AnalyzeReports 2: 1 freed, 9 out of 10 unreported.
   // AnalyzeReports 3: still present and unreported.
   int i;
-  char* a;
-  for (i = 0; i < 10; i++) {
-      a = (char*) malloc(100);
-      UseItOrLoseIt(a);
+  char* a = nullptr;
+  for (i = 0; i < seven + 3; i++) {
+      a = (char*) replace_malloc(100);
+      UseItOrLoseIt(a, seven);
   }
-  free(a);
+  replace_free(a);
 
-  // Min-sized block.
+  // Note: 8 bytes is the smallest requested size that gives consistent
+  // behaviour across all platforms with jemalloc.
   // AnalyzeReports 2: reported.
   // AnalyzeReports 3: thrice-reported.
-  char* a2 = (char*) malloc(0);
+  char* a2 = (char*) replace_malloc(8);
   Report(a2);
 
-  // Operator new[].
   // AnalyzeReports 2: reported.
   // AnalyzeReports 3: reportedness carries over, due to ReportOnAlloc.
-  char* b = new char[10];
+  char* b = (char*) replace_malloc(10);
   ReportOnAlloc(b);
 
   // ReportOnAlloc, then freed.
   // AnalyzeReports 2: freed, irrelevant.
   // AnalyzeReports 3: freed, irrelevant.
-  char* b2 = new char;
+  char* b2 = (char*) replace_malloc(1);
   ReportOnAlloc(b2);
-  free(b2);
+  replace_free(b2);
 
   // AnalyzeReports 2: reported 4 times.
   // AnalyzeReports 3: freed, irrelevant.
-  char* c = (char*) calloc(10, 3);
+  char* c = (char*) replace_calloc(10, 3);
   Report(c);
-  for (int i = 0; i < 3; i++) {
+  for (int i = 0; i < seven - 4; i++) {
     Report(c);
   }
 
   // AnalyzeReports 2: ignored.
   // AnalyzeReports 3: irrelevant.
   Report((void*)(intptr_t)i);
 
   // jemalloc rounds this up to 8192.
   // AnalyzeReports 2: reported.
   // AnalyzeReports 3: freed.
-  char* e = (char*) malloc(4096);
-  e = (char*) realloc(e, 4097);
+  char* e = (char*) replace_malloc(4096);
+  e = (char*) replace_realloc(e, 4097);
   Report(e);
 
   // First realloc is like malloc;  second realloc is shrinking.
   // AnalyzeReports 2: reported.
   // AnalyzeReports 3: re-reported.
-  char* e2 = (char*) realloc(nullptr, 1024);
-  e2 = (char*) realloc(e2, 512);
+  char* e2 = (char*) replace_realloc(nullptr, 1024);
+  e2 = (char*) replace_realloc(e2, 512);
   Report(e2);
 
   // First realloc is like malloc;  second realloc creates a min-sized block.
   // XXX: on Windows, second realloc frees the block.
   // AnalyzeReports 2: reported.
   // AnalyzeReports 3: freed, irrelevant.
-  char* e3 = (char*) realloc(nullptr, 1023);
-//e3 = (char*) realloc(e3, 0);
+  char* e3 = (char*) replace_realloc(nullptr, 1023);
+//e3 = (char*) replace_realloc(e3, 0);
   MOZ_ASSERT(e3);
   Report(e3);
 
   // AnalyzeReports 2: freed, irrelevant.
   // AnalyzeReports 3: freed, irrelevant.
-  char* f = (char*) malloc(64);
-  free(f);
+  char* f = (char*) replace_malloc(64);
+  replace_free(f);
 
   // AnalyzeReports 2: ignored.
   // AnalyzeReports 3: irrelevant.
   Report((void*)(intptr_t)0x0);
 
   // AnalyzeReports 2: mixture of reported and unreported.
   // AnalyzeReports 3: all unreported.
-  foo();
-  foo();
+  Foo(seven);
+  Foo(seven);
 
   // AnalyzeReports 2: twice-reported.
   // AnalyzeReports 3: twice-reported.
-  char* g1 = (char*) malloc(77);
+  char* g1 = (char*) replace_malloc(77);
   ReportOnAlloc(g1);
   ReportOnAlloc(g1);
 
   // AnalyzeReports 2: twice-reported.
   // AnalyzeReports 3: once-reported.
-  char* g2 = (char*) malloc(78);
+  char* g2 = (char*) replace_malloc(78);
   Report(g2);
   ReportOnAlloc(g2);
 
   // AnalyzeReports 2: twice-reported.
   // AnalyzeReports 3: once-reported.
-  char* g3 = (char*) malloc(79);
+  char* g3 = (char*) replace_malloc(79);
   ReportOnAlloc(g3);
   Report(g3);
 
   // All the odd-ball ones.
   // AnalyzeReports 2: all unreported.
   // AnalyzeReports 3: all freed, irrelevant.
   // XXX: no memalign on Mac
 //void* x = memalign(64, 65);           // rounds up to 128
-//UseItOrLoseIt(x);
+//UseItOrLoseIt(x, seven);
   // XXX: posix_memalign doesn't work on B2G
 //void* y;
 //posix_memalign(&y, 128, 129);         // rounds up to 256
-//UseItOrLoseIt(y);
+//UseItOrLoseIt(y, seven);
   // XXX: valloc doesn't work on Windows.
 //void* z = valloc(1);                  // rounds up to 4096
-//UseItOrLoseIt(z);
+//UseItOrLoseIt(z, seven);
 //aligned_alloc(64, 256);               // XXX: C11 only
 
   // AnalyzeReports 2.
   JSONWriter writer2(Move(aF2));
   AnalyzeReports(writer2);
 
   //---------
 
   Report(a2);
   Report(a2);
-  free(c);
-  free(e);
+  replace_free(c);
+  replace_free(e);
   Report(e2);
-  free(e3);
-//free(x);
-//free(y);
-//free(z);
+  replace_free(e3);
+//replace_free(x);
+//replace_free(y);
+//replace_free(z);
 
   // AnalyzeReports 3.
   JSONWriter writer3(Move(aF3));
   AnalyzeReports(writer3);
 
   //---------
 
   // Clear all knowledge of existing blocks to give us a clean slate.
   gBlockTable->clear();
 
   gOptions->SetSampleBelowSize(128);
 
   char* s;
 
   // This equals the sample size, and so is reported exactly.  It should be
   // listed before records of the same size that are sampled.
-  s = (char*) malloc(128);
-  UseItOrLoseIt(s);
+  s = (char*) replace_malloc(128);
+  UseItOrLoseIt(s, seven);
 
   // This exceeds the sample size, and so is reported exactly.
-  s = (char*) malloc(144);
-  UseItOrLoseIt(s);
+  s = (char*) replace_malloc(144);
+  UseItOrLoseIt(s, seven);
 
   // These together constitute exactly one sample.
-  for (int i = 0; i < 16; i++) {
-    s = (char*) malloc(8);
-    UseItOrLoseIt(s);
+  for (int i = 0; i < seven + 9; i++) {
+    s = (char*) replace_malloc(8);
+    UseItOrLoseIt(s, seven);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 0);
 
   // These fall 8 bytes short of a full sample.
-  for (int i = 0; i < 15; i++) {
-    s = (char*) malloc(8);
-    UseItOrLoseIt(s);
+  for (int i = 0; i < seven + 8; i++) {
+    s = (char*) replace_malloc(8);
+    UseItOrLoseIt(s, seven);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 120);
 
   // This exceeds the sample size, and so is recorded exactly.
-  s = (char*) malloc(256);
-  UseItOrLoseIt(s);
+  s = (char*) replace_malloc(256);
+  UseItOrLoseIt(s, seven);
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 120);
 
   // This gets more than to a full sample from the |i < 15| loop above.
-  s = (char*) malloc(96);
-  UseItOrLoseIt(s);
+  s = (char*) replace_malloc(96);
+  UseItOrLoseIt(s, seven);
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 88);
 
   // This gets to another full sample.
-  for (int i = 0; i < 5; i++) {
-    s = (char*) malloc(8);
-    UseItOrLoseIt(s);
+  for (int i = 0; i < seven - 2; i++) {
+    s = (char*) replace_malloc(8);
+    UseItOrLoseIt(s, seven);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 0);
 
   // This allocates 16, 32, ..., 128 bytes, which results in a heap block
   // record that contains a mix of sample and non-sampled blocks, and so should
   // be printed with '~' signs.
-  for (int i = 1; i <= 8; i++) {
-    s = (char*) malloc(i * 16);
-    UseItOrLoseIt(s);
+  for (int i = 1; i <= seven + 1; i++) {
+    s = (char*) replace_malloc(i * 16);
+    UseItOrLoseIt(s, seven);
   }
   MOZ_ASSERT(gSmallBlockActualSizeCounter == 64);
 
   // At the end we're 64 bytes into the current sample so we report ~1,424
   // bytes of allocation overall, which is 64 less than the real value 1,488.
 
   // AnalyzeReports 4.
   JSONWriter writer4(Move(aF4));
deleted file mode 100755
--- a/memory/replace/dmd/check_test_output.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#! /usr/bin/python
-#
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-"""This script takes the file produced by DMD's test mode and checks its
-correctness.
-
-It produces the following output files: $TMP/full-{fixed,filtered,diff}.dmd.
-
-It runs the appropriate fix* script to get nice stack traces.  It also
-filters out platform-specific details from the test output file.
-
-Note: you must run this from the same directory that you invoked DMD's test
-mode, otherwise the fix* script will not work properly, because some of the
-paths in the test output are relative.
-
-"""
-
-from __future__ import print_function
-
-import os
-import platform
-import re
-import subprocess
-import sys
-import tempfile
-
-def test(src_dir, kind, options, i):
-    # Filenames
-    tmp_dir = tempfile.gettempdir()
-    in_name        = os.path.join(src_dir, "full{:d}.json".format(i))
-    fixed_name     = os.path.join(tmp_dir, "full-{:}-fixed{:d}.json".format(kind, i))
-    converted_name = os.path.join(tmp_dir, "full-{:}-converted{:d}.txt".format(kind, i))
-    filtered_name  = os.path.join(tmp_dir, "full-{:}-filtered{:d}.txt".format(kind, i))
-    diff_name      = os.path.join(tmp_dir, "full-{:}-diff{:d}.txt".format(kind, i))
-    expected_name  = os.path.join(src_dir, "memory", "replace", "dmd", "test", "full-{:}-expected{:d}.txt".format(kind, i))
-
-    # Fix stack traces
-
-    sys_name = platform.system()
-    fix = os.path.join(src_dir, "tools", "rb")
-    if sys_name == "Linux":
-        fix = os.path.join(fix, "fix_linux_stack.py")
-    elif sys_name == "Darwin":
-        fix = os.path.join(fix, "fix_macosx_stack.py")
-    else:
-        print("unhandled platform: " + sys_name, file=sys.stderr)
-        sys.exit(1)
-
-    subprocess.call(fix, stdin=open(in_name, "r"),
-                         stdout=open(fixed_name, "w"))
-
-    # Convert from JSON
-
-    convert = [os.path.join(src_dir, "memory", "replace", "dmd", "dmd.py")] + \
-               options + ['--no-fix-stacks', fixed_name]
-    subprocess.call(convert, stdout=open(converted_name, "w"))
-
-    # Filter output
-
-    # In heap block records we filter out most stack frames.  The only thing
-    # we leave behind is a "DMD.cpp" entry if we see one or more frames that
-    # have DMD.cpp in them.  There is simply too much variation to do anything
-    # better than that.
-
-    with open(converted_name, "r") as fin, \
-         open(filtered_name, "w") as fout:
-
-        test_frame_re = re.compile(r".*(DMD.cpp)")
-
-        for line in fin:
-            if re.match(r"  (Allocated at {|Reported( again)? at {)", line):
-                # It's a heap block record.
-                print(line, end='', file=fout)
-
-                # Filter the stack trace -- print a single line if we see one
-                # or more frames involving DMD.cpp.
-                seen_DMD_frame = False
-                for frame in fin:
-                    if re.match(r"    ", frame):
-                        m = test_frame_re.match(frame)
-                        if m:
-                            seen_DMD_frame = True
-                    else:
-                        # We're past the stack trace.
-                        if seen_DMD_frame:
-                            print("    ... DMD.cpp", file=fout)
-                        print(frame, end='', file=fout)
-                        break
-
-            else:
-                # A line that needs no special handling.  Copy it through.
-                print(line, end='', file=fout)
-
-    # Compare with expected output
-
-    ret = subprocess.call(["diff", "-u", expected_name, filtered_name],
-                          stdout=open(diff_name, "w"))
-
-    if ret == 0:
-        print("TEST-PASS | {:} {:d} | ok".format(kind, i))
-    else:
-        print("TEST-UNEXPECTED-FAIL | {:} {:d} | mismatch".format(kind, i))
-        print("Output files:")
-        print("- " + fixed_name);
-        print("- " + converted_name);
-        print("- " + filtered_name);
-        print("- " + diff_name);
-
-
-def main():
-    if (len(sys.argv) != 2):
-        print("usage:", sys.argv[0], "<topsrcdir>")
-        sys.exit(1)
-
-    src_dir = sys.argv[1]
-
-    ntests = 4
-    for i in range(1, ntests+1):
-        test(src_dir, "reports", [], i)
-        test(src_dir, "heap", ["--ignore-reports"], i)
-
-
-if __name__ == "__main__":
-    main()
--- a/memory/replace/dmd/dmd.py
+++ b/memory/replace/dmd/dmd.py
@@ -97,17 +97,18 @@ def parseCommandLine():
         return value
 
     description = '''
 Analyze heap data produced by DMD.
 If no files are specified, read from stdin.
 Write to stdout unless -o/--output is specified.
 Stack traces are fixed to show function names, filenames and line numbers
 unless --no-fix-stacks is specified; stack fixing modifies the original file
-and may take some time.
+and may take some time. If specified, the BREAKPAD_SYMBOLS_PATH environment
+variable is used to find breakpad symbols for stack fixing.
 '''
     p = argparse.ArgumentParser(description=description)
 
     p.add_argument('-o', '--output', type=argparse.FileType('w'),
                    help='output file; stdout if unspecified')
 
     p.add_argument('-f', '--max-frames', type=range_1_24,
                    help='maximum number of frames to consider in each trace')
@@ -124,32 +125,37 @@ and may take some time.
                    help='ignore allocation functions at the start of traces')
 
     p.add_argument('-b', '--show-all-block-sizes', action='store_true',
                    help='show individual block sizes for each record')
 
     p.add_argument('--no-fix-stacks', action='store_true',
                    help='do not fix stacks')
 
+    p.add_argument('--filter-stacks-for-testing', action='store_true',
+                   help='filter stack traces; only useful for testing purposes')
+
     p.add_argument('input_file', type=argparse.FileType('r'))
 
     return p.parse_args(sys.argv[1:])
 
 
 # Fix stacks if necessary: first write the output to a tempfile, then replace
 # the original file with it.
 def fixStackTraces(args):
     # This append() call is needed to make the import statements work when this
     # script is installed as a symlink.
     sys.path.append(os.path.dirname(__file__))
 
-    # XXX: should incorporate fix_stack_using_bpsyms.py here as well, like in
-    #      testing/mochitests/runtests.py
+    bpsyms = os.environ.get('BREAKPAD_SYMBOLS_PATH', None)
     sysname = platform.system()
-    if sysname == 'Linux':
+    if bpsyms and os.path.exists(bpsyms):
+        import fix_stack_using_bpsyms as fixModule
+        fix = lambda line: fixModule.fixSymbols(line, bpsyms)
+    elif sysname == 'Linux':
         import fix_linux_stack as fixModule
         fix = lambda line: fixModule.fixSymbols(line)
     elif sysname == 'Darwin':
         import fix_macosx_stack as fixModule
         fix = lambda line: fixModule.fixSymbols(line)
     else:
         fix = None  # there is no fix script for Windows
 
@@ -299,20 +305,36 @@ def main():
     def plural(n):
         return '' if n == 1 else 's'
 
     # Prints to stdout, or to file if -o/--output was specified.
     def out(*arguments, **kwargs):
         print(*arguments, file=args.output, **kwargs)
 
     def printStack(traceTable, frameTable, traceKey):
+        frameKeys = traceTable[traceKey]
+        fmt = '    #{:02d}{:}'
+
+        if args.filter_stacks_for_testing:
+            # If any frame has "DMD.cpp" or "replace_malloc.c" in its
+            # description -- as should be the case for every stack trace when
+            # running DMD in test mode -- we replace the entire trace with a
+            # single, predictable frame. There is too much variation in the
+            # stack traces across different machines and platforms to do more
+            # specific matching.
+            for frameKey in frameKeys:
+                frameDesc = frameTable[frameKey]
+                if 'DMD.cpp' in frameDesc or 'replace_malloc.c' in frameDesc:
+                    out(fmt.format(1, ': ... DMD.cpp ...'))
+                    return
+
         # The frame number is always '#00' (see DMD.h for why), so we have to
         # replace that with the correct frame number.
         for n, frameKey in enumerate(traceTable[traceKey], start=1):
-            out('    #{:02d}{:}'.format(n, frameTable[frameKey][3:]))
+            out(fmt.format(n, frameTable[frameKey][3:]))
 
     def printRecords(recordKind, records, heapUsableSize):
         RecordKind = recordKind.capitalize()
         out(separator)
         numRecords = len(records)
         cmpRecords = sortByChoices[args.sort_by]
         sortedRecords = sorted(records.values(), cmp=cmpRecords, reverse=True)
         kindBlocks = 0
--- a/memory/replace/dmd/moz.build
+++ b/memory/replace/dmd/moz.build
@@ -27,8 +27,13 @@ if CONFIG['MOZ_OPTIMIZE']:
     DEFINES['MOZ_OPTIMIZE'] = True
 
 DISABLE_STL_WRAPPING = True
 
 if CONFIG['OS_ARCH'] == 'WINNT':
     OS_LIBS += [
         'dbghelp',
     ]
+
+XPCSHELL_TESTS_MANIFESTS += [
+    'test/xpcshell.ini',
+]
+
--- a/memory/replace/dmd/test/full-heap-expected2.txt
+++ b/memory/replace/dmd/test/full-heap-expected2.txt
@@ -7,116 +7,116 @@ Invocation {
 
 #-----------------------------------------------------------------
 
 Live {
   1 block in heap block record 1 of 12
   8,192 bytes (4,097 requested / 4,095 slop)
   67.77% of the heap (67.77% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 2 of 12
   1,024 bytes (1,023 requested / 1 slop)
   8.47% of the heap (76.24% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   9 blocks in heap block record 3 of 12
   1,008 bytes (900 requested / 108 slop)
   8.34% of the heap (84.58% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   6 blocks in heap block record 4 of 12
   528 bytes (528 requested / 0 slop)
   4.37% of the heap (88.95% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   6 blocks in heap block record 5 of 12
   528 bytes (528 requested / 0 slop)
   4.37% of the heap (93.32% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 6 of 12
   512 bytes (512 requested / 0 slop)
   4.24% of the heap (97.55% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 7 of 12
   80 bytes (79 requested / 1 slop)
   0.66% of the heap (98.21% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 8 of 12
   80 bytes (78 requested / 2 slop)
   0.66% of the heap (98.87% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 9 of 12
   80 bytes (77 requested / 3 slop)
   0.66% of the heap (99.54% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 10 of 12
   32 bytes (30 requested / 2 slop)
   0.26% of the heap (99.80% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 11 of 12
   16 bytes (10 requested / 6 slop)
   0.13% of the heap (99.93% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 12 of 12
-  8 bytes (0 requested / 8 slop)
+  8 bytes (8 requested / 0 slop)
   0.07% of the heap (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Summary {
   Total: 12,088 bytes in 30 blocks
 }
--- a/memory/replace/dmd/test/full-heap-expected3.txt
+++ b/memory/replace/dmd/test/full-heap-expected3.txt
@@ -7,89 +7,89 @@ Invocation {
 
 #-----------------------------------------------------------------
 
 Live {
   9 blocks in heap block record 1 of 9
   1,008 bytes (900 requested / 108 slop)
   35.49% of the heap (35.49% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   6 blocks in heap block record 2 of 9
   528 bytes (528 requested / 0 slop)
   18.59% of the heap (54.08% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   6 blocks in heap block record 3 of 9
   528 bytes (528 requested / 0 slop)
   18.59% of the heap (72.68% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 4 of 9
   512 bytes (512 requested / 0 slop)
   18.03% of the heap (90.70% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 5 of 9
   80 bytes (79 requested / 1 slop)
   2.82% of the heap (93.52% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 6 of 9
   80 bytes (78 requested / 2 slop)
   2.82% of the heap (96.34% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 7 of 9
   80 bytes (77 requested / 3 slop)
   2.82% of the heap (99.15% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 8 of 9
   16 bytes (10 requested / 6 slop)
   0.56% of the heap (99.72% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 9 of 9
-  8 bytes (0 requested / 8 slop)
+  8 bytes (8 requested / 0 slop)
   0.28% of the heap (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Summary {
   Total: 2,840 bytes in 27 blocks
 }
--- a/memory/replace/dmd/test/full-heap-expected4.txt
+++ b/memory/replace/dmd/test/full-heap-expected4.txt
@@ -7,71 +7,71 @@ Invocation {
 
 #-----------------------------------------------------------------
 
 Live {
   ~4 blocks in heap block record 1 of 7
   ~512 bytes (~512 requested / ~0 slop)
   35.96% of the heap (35.96% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 2 of 7
   256 bytes (256 requested / 0 slop)
   17.98% of the heap (53.93% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 3 of 7
   144 bytes (144 requested / 0 slop)
   10.11% of the heap (64.04% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   1 block in heap block record 4 of 7
   128 bytes (128 requested / 0 slop)
   8.99% of the heap (73.03% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   ~1 block in heap block record 5 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (82.02% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   ~1 block in heap block record 6 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (91.01% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Live {
   ~1 block in heap block record 7 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Summary {
   Total: ~1,424 bytes in ~10 blocks
 }
--- a/memory/replace/dmd/test/full-reports-expected2.txt
+++ b/memory/replace/dmd/test/full-reports-expected2.txt
@@ -8,248 +8,248 @@ Invocation {
 #-----------------------------------------------------------------
 
 Twice-reported {
   1 block in heap block record 1 of 4
   80 bytes (79 requested / 1 slop)
   0.66% of the heap (0.66% cumulative)
   29.41% of twice-reported (29.41% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Twice-reported {
   1 block in heap block record 2 of 4
   80 bytes (78 requested / 2 slop)
   0.66% of the heap (1.32% cumulative)
   29.41% of twice-reported (58.82% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Twice-reported {
   1 block in heap block record 3 of 4
   80 bytes (77 requested / 3 slop)
   0.66% of the heap (1.99% cumulative)
   29.41% of twice-reported (88.24% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Twice-reported {
   1 block in heap block record 4 of 4
   32 bytes (30 requested / 2 slop)
   0.26% of the heap (2.25% cumulative)
   11.76% of twice-reported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Unreported {
   9 blocks in heap block record 1 of 3
   1,008 bytes (900 requested / 108 slop)
   8.34% of the heap (8.34% cumulative)
   81.82% of unreported (81.82% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   2 blocks in heap block record 2 of 3
   112 bytes (112 requested / 0 slop)
   0.93% of the heap (9.27% cumulative)
   9.09% of unreported (90.91% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   2 blocks in heap block record 3 of 3
   112 bytes (112 requested / 0 slop)
   0.93% of the heap (10.19% cumulative)
   9.09% of unreported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Once-reported {
   1 block in heap block record 1 of 11
   8,192 bytes (4,097 requested / 4,095 slop)
   67.77% of the heap (67.77% cumulative)
   77.40% of once-reported (77.40% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 2 of 11
   1,024 bytes (1,023 requested / 1 slop)
   8.47% of the heap (76.24% cumulative)
   9.67% of once-reported (87.07% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 3 of 11
   512 bytes (512 requested / 0 slop)
   4.24% of the heap (80.48% cumulative)
   4.84% of once-reported (91.91% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   2 blocks in heap block record 4 of 11
   240 bytes (240 requested / 0 slop)
   1.99% of the heap (82.46% cumulative)
   2.27% of once-reported (94.18% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   2 blocks in heap block record 5 of 11
   240 bytes (240 requested / 0 slop)
   1.99% of the heap (84.45% cumulative)
   2.27% of once-reported (96.45% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 6 of 11
   96 bytes (96 requested / 0 slop)
   0.79% of the heap (85.24% cumulative)
   0.91% of once-reported (97.35% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 7 of 11
   96 bytes (96 requested / 0 slop)
   0.79% of the heap (86.04% cumulative)
   0.91% of once-reported (98.26% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 8 of 11
   80 bytes (80 requested / 0 slop)
   0.66% of the heap (86.70% cumulative)
   0.76% of once-reported (99.02% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 9 of 11
   80 bytes (80 requested / 0 slop)
   0.66% of the heap (87.36% cumulative)
   0.76% of once-reported (99.77% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 10 of 11
   16 bytes (10 requested / 6 slop)
   0.13% of the heap (87.49% cumulative)
   0.15% of once-reported (99.92% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 11 of 11
-  8 bytes (0 requested / 8 slop)
+  8 bytes (8 requested / 0 slop)
   0.07% of the heap (87.56% cumulative)
   0.08% of once-reported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Summary {
   Total:                12,088 bytes (100.00%) in      30 blocks (100.00%)
   Unreported:            1,232 bytes ( 10.19%) in      13 blocks ( 43.33%)
--- a/memory/replace/dmd/test/full-reports-expected3.txt
+++ b/memory/replace/dmd/test/full-reports-expected3.txt
@@ -8,125 +8,125 @@ Invocation {
 #-----------------------------------------------------------------
 
 Twice-reported {
   1 block in heap block record 1 of 2
   80 bytes (77 requested / 3 slop)
   2.82% of the heap (2.82% cumulative)
   90.91% of twice-reported (90.91% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Twice-reported {
   1 block in heap block record 2 of 2
-  8 bytes (0 requested / 8 slop)
+  8 bytes (8 requested / 0 slop)
   0.28% of the heap (3.10% cumulative)
   9.09% of twice-reported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported again at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Unreported {
   9 blocks in heap block record 1 of 3
   1,008 bytes (900 requested / 108 slop)
   35.49% of the heap (35.49% cumulative)
   48.84% of unreported (48.84% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   6 blocks in heap block record 2 of 3
   528 bytes (528 requested / 0 slop)
   18.59% of the heap (54.08% cumulative)
   25.58% of unreported (74.42% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   6 blocks in heap block record 3 of 3
   528 bytes (528 requested / 0 slop)
   18.59% of the heap (72.68% cumulative)
   25.58% of unreported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Once-reported {
   1 block in heap block record 1 of 4
   512 bytes (512 requested / 0 slop)
   18.03% of the heap (18.03% cumulative)
   74.42% of once-reported (74.42% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 2 of 4
   80 bytes (79 requested / 1 slop)
   2.82% of the heap (20.85% cumulative)
   11.63% of once-reported (86.05% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 3 of 4
   80 bytes (78 requested / 2 slop)
   2.82% of the heap (23.66% cumulative)
   11.63% of once-reported (97.67% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Once-reported {
   1 block in heap block record 4 of 4
   16 bytes (10 requested / 6 slop)
   0.56% of the heap (24.23% cumulative)
   2.33% of once-reported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
   Reported at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 Summary {
   Total:                 2,840 bytes (100.00%) in      27 blocks (100.00%)
   Unreported:            2,064 bytes ( 72.68%) in      21 blocks ( 77.78%)
--- a/memory/replace/dmd/test/full-reports-expected4.txt
+++ b/memory/replace/dmd/test/full-reports-expected4.txt
@@ -12,77 +12,77 @@ Invocation {
 #-----------------------------------------------------------------
 
 Unreported {
   ~4 blocks in heap block record 1 of 7
   ~512 bytes (~512 requested / ~0 slop)
   35.96% of the heap (35.96% cumulative)
   35.96% of unreported (35.96% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   1 block in heap block record 2 of 7
   256 bytes (256 requested / 0 slop)
   17.98% of the heap (53.93% cumulative)
   17.98% of unreported (53.93% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   1 block in heap block record 3 of 7
   144 bytes (144 requested / 0 slop)
   10.11% of the heap (64.04% cumulative)
   10.11% of unreported (64.04% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   1 block in heap block record 4 of 7
   128 bytes (128 requested / 0 slop)
   8.99% of the heap (73.03% cumulative)
   8.99% of unreported (73.03% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   ~1 block in heap block record 5 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (82.02% cumulative)
   8.99% of unreported (82.02% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   ~1 block in heap block record 6 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (91.01% cumulative)
   8.99% of unreported (91.01% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 Unreported {
   ~1 block in heap block record 7 of 7
   ~128 bytes (~128 requested / ~0 slop)
   8.99% of the heap (100.00% cumulative)
   8.99% of unreported (100.00% cumulative)
   Allocated at {
-    ... DMD.cpp
+    #01: ... DMD.cpp ...
   }
 }
 
 #-----------------------------------------------------------------
 
 # no once-reported heap blocks
 
 #-----------------------------------------------------------------
new file mode 100644
--- /dev/null
+++ b/memory/replace/dmd/test/test_dmd.js
@@ -0,0 +1,87 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*-*/
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+const {classes: Cc, interfaces: Ci, utils: Cu} = Components
+
+Cu.import("resource://gre/modules/FileUtils.jsm");
+
+// The xpcshell test harness sets PYTHON so we can read it here.
+let gEnv = Cc["@mozilla.org/process/environment;1"]
+             .getService(Ci.nsIEnvironment);
+let gPythonName = gEnv.get("PYTHON");
+
+// If we're testing locally, the script is in "CurProcD". Otherwise, it is in
+// another location that we have to find.
+let gDmdScriptFile = FileUtils.getFile("CurProcD", ["dmd.py"]);
+if (!gDmdScriptFile.exists()) {
+  gDmdScriptFile = FileUtils.getFile("CurWorkD", []);
+  while (gDmdScriptFile.path.contains("xpcshell")) {
+    gDmdScriptFile = gDmdScriptFile.parent;
+  }
+  gDmdScriptFile.append("bin");
+  gDmdScriptFile.append("dmd.py");
+}
+
+function test(aJsonFile, aKind, aOptions, aN) {
+  // DMD writes the JSON files to CurWorkD, so we do likewise here with
+  // |actualFile| for consistency. It is removed once we've finished.
+  let expectedFile =
+    FileUtils.getFile("CurWorkD",
+                      ["full-" + aKind + "-expected" + aN + ".txt"]);
+  let actualFile =
+    FileUtils.getFile("CurWorkD",
+                      ["full-" + aKind + "-actual"   + aN + ".txt"]);
+
+  // Run dmd.py on the JSON file, producing |actualFile|.
+
+  let pythonFile = new FileUtils.File(gPythonName);
+  let pythonProcess = Cc["@mozilla.org/process/util;1"]
+                        .createInstance(Components.interfaces.nsIProcess);
+  pythonProcess.init(pythonFile);
+
+  let args = [
+    gDmdScriptFile.path,
+    "--filter-stacks-for-testing",
+    "-o", actualFile.path
+  ];
+  args = args.concat(aOptions);
+  args.push(aJsonFile.path);
+
+  pythonProcess.run(/* blocking = */true, args, args.length);
+
+  // Compare |expectedFile| with |actualFile|. Difference are printed to
+  // stdout.
+
+  let diffFile = new FileUtils.File("/usr/bin/diff");
+  let diffProcess = Cc["@mozilla.org/process/util;1"]
+                      .createInstance(Components.interfaces.nsIProcess);
+  // XXX: this doesn't work on Windows (bug 1076446).
+  diffProcess.init(diffFile);
+
+  args = ["-u", expectedFile.path, actualFile.path];
+  diffProcess.run(/* blocking = */true, args, args.length);
+  let success = diffProcess.exitValue == 0;
+  ok(success, aKind + " " + aN);
+
+  actualFile.remove(true);
+}
+
+function run_test() {
+  // These tests do full end-to-end testing of DMD, i.e. both the C++ code that
+  // generates the JSON output, and the script that post-processes that output.
+  // The test relies on DMD's test mode executing beforehand, in order to
+  // produce the relevant JSON files.
+  //
+  // Run these synchronously, because test() updates the full*.json files
+  // in-place (to fix stacks) when it runs dmd.py, and that's not safe to do
+  // asynchronously.
+  for (let i = 1; i <= 4; i++) {
+      let jsonFile = FileUtils.getFile("CurWorkD", ["full" + i + ".json"]);
+      test(jsonFile, "heap", ["--ignore-reports"], i);
+      test(jsonFile, "reports", [], i);
+      jsonFile.remove(true);
+  }
+}
new file mode 100644
--- /dev/null
+++ b/memory/replace/dmd/test/xpcshell.ini
@@ -0,0 +1,16 @@
+[DEFAULT]
+support-files =
+  full-heap-expected1.txt
+  full-heap-expected2.txt
+  full-heap-expected3.txt
+  full-heap-expected4.txt
+  full-reports-expected1.txt
+  full-reports-expected2.txt
+  full-reports-expected3.txt
+  full-reports-expected4.txt
+
+# Bug 1077230 explains why this test is disabled on Mac 10.6.
+# Bug 1076446 is open for getting this test working on on Windows.
+[test_dmd.js]
+dmd = true
+run-if = os == 'linux' || os == 'mac' && os_version != '10.6'
--- a/netwerk/dns/nsDNSService2.cpp
+++ b/netwerk/dns/nsDNSService2.cpp
@@ -577,24 +577,21 @@ nsDNSService::Init()
         mIPv4OnlyDomains = ipv4OnlyDomains; // exchanges buffer ownership
         mDisableIPv6 = disableIPv6;
 
         // Disable prefetching either by explicit preference or if a manual proxy is configured 
         mDisablePrefetch = disablePrefetch || (proxyType == nsIProtocolProxyService::PROXYCONFIG_MANUAL);
 
         mLocalDomains.Clear();
         if (localDomains) {
-            nsAdoptingString domains;
-            domains.AssignASCII(nsDependentCString(localDomains).get());
-            nsCharSeparatedTokenizer tokenizer(domains, ',',
-                                               nsCharSeparatedTokenizerTemplate<>::SEPARATOR_OPTIONAL);
+            nsCCharSeparatedTokenizer tokenizer(localDomains, ',',
+                                                nsCCharSeparatedTokenizer::SEPARATOR_OPTIONAL);
 
             while (tokenizer.hasMoreTokens()) {
-                const nsSubstring& domain = tokenizer.nextToken();
-                mLocalDomains.PutEntry(nsDependentCString(NS_ConvertUTF16toUTF8(domain).get()));
+                mLocalDomains.PutEntry(tokenizer.nextToken());
             }
         }
         mNotifyResolution = notifyResolution;
         if (mNotifyResolution) {
             mObserverService =
               new nsMainThreadPtrHolder<nsIObserverService>(obs);
         }
     }
@@ -643,19 +640,36 @@ nsDNSService::GetPrefetchEnabled(bool *o
 
 NS_IMETHODIMP
 nsDNSService::SetPrefetchEnabled(bool inVal)
 {
     mDisablePrefetch = !inVal;
     return NS_OK;
 }
 
+static inline bool PreprocessHostname(bool              aLocalDomain,
+                                      const nsACString &aInput,
+                                      nsIIDNService    *aIDN,
+                                      nsACString       &aACE)
+{
+    if (aLocalDomain) {
+        aACE.AssignLiteral("localhost");
+        return true;
+    }
+
+    if (!aIDN || IsASCII(aInput)) {
+        aACE = aInput;
+        return true;
+    }
+
+    return IsUTF8(aInput) && NS_SUCCEEDED(aIDN->ConvertUTF8toACE(aInput, aACE));
+}
 
 NS_IMETHODIMP
-nsDNSService::AsyncResolve(const nsACString  &hostname,
+nsDNSService::AsyncResolve(const nsACString  &aHostname,
                            uint32_t           flags,
                            nsIDNSListener    *listener,
                            nsIEventTarget    *target_,
                            nsICancelable    **result)
 {
     // grab reference to global host resolver and IDN service.  beware
     // simultaneous shutdown!!
     nsRefPtr<nsHostResolver> res;
@@ -665,74 +679,60 @@ nsDNSService::AsyncResolve(const nsACStr
     {
         MutexAutoLock lock(mLock);
 
         if (mDisablePrefetch && (flags & RESOLVE_SPECULATE))
             return NS_ERROR_DNS_LOOKUP_QUEUE_FULL;
 
         res = mResolver;
         idn = mIDN;
-        localDomain = mLocalDomains.GetEntry(hostname);
+        localDomain = mLocalDomains.GetEntry(aHostname);
     }
 
     if (mNotifyResolution) {
         NS_DispatchToMainThread(new NotifyDNSResolution(mObserverService,
-                                                        hostname));
+                                                        aHostname));
     }
 
     if (!res)
         return NS_ERROR_OFFLINE;
 
     if (mOffline)
         flags |= RESOLVE_OFFLINE;
 
-    const nsACString *hostPtr = &hostname;
-
-    nsAutoCString strLocalhost(NS_LITERAL_CSTRING("localhost"));
-    if (localDomain) {
-        hostPtr = &strLocalhost;
-    }
-
-    nsresult rv;
-    nsAutoCString hostACE;
-    if (idn && !IsASCII(*hostPtr)) {
-        if (IsUTF8(*hostPtr) &&
-            NS_SUCCEEDED(idn->ConvertUTF8toACE(*hostPtr, hostACE))) {
-            hostPtr = &hostACE;
-        } else {
-            return NS_ERROR_FAILURE;
-        }
-    }
+    nsCString hostname;
+    if (!PreprocessHostname(localDomain, aHostname, idn, hostname))
+        return NS_ERROR_FAILURE;
 
     // make sure JS callers get notification on the main thread
     nsCOMPtr<nsIXPConnectWrappedJS> wrappedListener = do_QueryInterface(listener);
     if (wrappedListener && !target) {
         nsCOMPtr<nsIThread> mainThread;
         NS_GetMainThread(getter_AddRefs(mainThread));
         target = do_QueryInterface(mainThread);
     }
 
     if (target) {
       listener = new DNSListenerProxy(listener, target);
     }
 
-    uint16_t af = GetAFForLookup(*hostPtr, flags);
+    uint16_t af = GetAFForLookup(hostname, flags);
 
     nsDNSAsyncRequest *req =
-            new nsDNSAsyncRequest(res, *hostPtr, listener, flags, af);
+            new nsDNSAsyncRequest(res, hostname, listener, flags, af);
     if (!req)
         return NS_ERROR_OUT_OF_MEMORY;
     NS_ADDREF(*result = req);
 
-    MOZ_EVENT_TRACER_NAME_OBJECT(req, hostname.BeginReading());
+    MOZ_EVENT_TRACER_NAME_OBJECT(req, aHostname.BeginReading());
     MOZ_EVENT_TRACER_WAIT(req, "net::dns::lookup");
 
     // addref for resolver; will be released when OnLookupComplete is called.
     NS_ADDREF(req);
-    rv = res->ResolveHost(req->mHost.get(), flags, af, req);
+    nsresult rv = res->ResolveHost(req->mHost.get(), flags, af, req);
     if (NS_FAILED(rv)) {
         NS_RELEASE(req);
         NS_RELEASE(*result);
     }
     return rv;
 }
 
 NS_IMETHODIMP
@@ -740,109 +740,89 @@ nsDNSService::CancelAsyncResolve(const n
                                  uint32_t           aFlags,
                                  nsIDNSListener    *aListener,
                                  nsresult           aReason)
 {
     // grab reference to global host resolver and IDN service.  beware
     // simultaneous shutdown!!
     nsRefPtr<nsHostResolver> res;
     nsCOMPtr<nsIIDNService> idn;
+    bool localDomain = false;
     {
         MutexAutoLock lock(mLock);
 
         if (mDisablePrefetch && (aFlags & RESOLVE_SPECULATE))
             return NS_ERROR_DNS_LOOKUP_QUEUE_FULL;
 
         res = mResolver;
         idn = mIDN;
+        localDomain = mLocalDomains.GetEntry(aHostname);
     }
     if (!res)
         return NS_ERROR_OFFLINE;
 
-    nsCString hostname(aHostname);
-
-    nsAutoCString hostACE;
-    if (idn && !IsASCII(aHostname)) {
-        if (IsUTF8(aHostname) &&
-            NS_SUCCEEDED(idn->ConvertUTF8toACE(aHostname, hostACE))) {
-            hostname = hostACE;
-        } else {
-            return NS_ERROR_FAILURE;
-        }
-    }
+    nsCString hostname;
+    if (!PreprocessHostname(localDomain, aHostname, idn, hostname))
+        return NS_ERROR_FAILURE;
 
     uint16_t af = GetAFForLookup(hostname, aFlags);
 
     res->CancelAsyncRequest(hostname.get(), aFlags, af, aListener, aReason);
     return NS_OK;
 }
 
 NS_IMETHODIMP
-nsDNSService::Resolve(const nsACString &hostname,
+nsDNSService::Resolve(const nsACString &aHostname,
                       uint32_t          flags,
                       nsIDNSRecord    **result)
 {
     // grab reference to global host resolver and IDN service.  beware
     // simultaneous shutdown!!
     nsRefPtr<nsHostResolver> res;
     nsCOMPtr<nsIIDNService> idn;
     bool localDomain = false;
     {
         MutexAutoLock lock(mLock);
         res = mResolver;
         idn = mIDN;
-        localDomain = mLocalDomains.GetEntry(hostname);
+        localDomain = mLocalDomains.GetEntry(aHostname);
     }
 
     if (mNotifyResolution) {
         NS_DispatchToMainThread(new NotifyDNSResolution(mObserverService,
-                                                        hostname));
+                                                        aHostname));
     }
 
     NS_ENSURE_TRUE(res, NS_ERROR_OFFLINE);
 
     if (mOffline)
         flags |= RESOLVE_OFFLINE;
 
-    const nsACString *hostPtr = &hostname;
-
-    nsAutoCString strLocalhost(NS_LITERAL_CSTRING("localhost"));
-    if (localDomain) {
-        hostPtr = &strLocalhost;
-    }
-
-    nsresult rv;
-    nsAutoCString hostACE;
-    if (idn && !IsASCII(*hostPtr)) {
-        if (IsUTF8(*hostPtr) &&
-            NS_SUCCEEDED(idn->ConvertUTF8toACE(*hostPtr, hostACE))) {
-            hostPtr = &hostACE;
-        } else {
-            return NS_ERROR_FAILURE;
-        }
-    }
+    nsCString hostname;
+    if (!PreprocessHostname(localDomain, aHostname, idn, hostname))
+        return NS_ERROR_FAILURE;
 
     //
     // sync resolve: since the host resolver only works asynchronously, we need
     // to use a mutex and a condvar to wait for the result.  however, since the
     // result may be in the resolvers cache, we might get called back recursively
     // on the same thread.  so, our mutex needs to be re-entrant.  in other words,
     // we need to use a monitor! ;-)
     //
     
     PRMonitor *mon = PR_NewMonitor();
     if (!mon)
         return NS_ERROR_OUT_OF_MEMORY;
 
     PR_EnterMonitor(mon);
     nsDNSSyncRequest syncReq(mon);
 
-    uint16_t af = GetAFForLookup(*hostPtr, flags);
+    uint16_t af = GetAFForLookup(hostname, flags);
 
-    rv = res->ResolveHost(PromiseFlatCString(*hostPtr).get(), flags, af, &syncReq);
+    nsresult rv = res->ResolveHost(hostname.get(), flags, af, &syncReq);
     if (NS_SUCCEEDED(rv)) {
         // wait for result
         while (!syncReq.mDone)
             PR_Wait(mon, PR_INTERVAL_NO_TIMEOUT);
 
         if (NS_FAILED(syncReq.mStatus))
             rv = syncReq.mStatus;
         else {
--- a/python/mozbuild/mozbuild/frontend/emitter.py
+++ b/python/mozbuild/mozbuild/frontend/emitter.py
@@ -663,16 +663,19 @@ class TreeMetadataEmitter(LoggingMixin):
             if is_framework:
                 if soname:
                     raise SandboxValidationError(
                         'IS_FRAMEWORK conflicts with SONAME. '
                         'Please remove one.', context)
                 shared_lib = True
                 shared_args['variant'] = SharedLibrary.FRAMEWORK
 
+            if not static_lib and not shared_lib:
+                static_lib = True
+
             if static_name:
                 if not static_lib:
                     raise SandboxValidationError(
                         'STATIC_LIBRARY_NAME requires FORCE_STATIC_LIB',
                         context)
                 static_args['real_name'] = static_name
 
             if shared_name:
@@ -683,19 +686,16 @@ class TreeMetadataEmitter(LoggingMixin):
                 shared_args['real_name'] = shared_name
 
             if soname:
                 if not shared_lib:
                     raise SandboxValidationError(
                         'SONAME requires FORCE_SHARED_LIB', context)
                 shared_args['soname'] = soname
 
-            if not static_lib and not shared_lib:
-                static_lib = True
-
             # If both a shared and a static library are created, only the
             # shared library is meant to be a SDK library.
             if context.get('SDK_LIBRARY'):
                 if shared_lib:
                     shared_args['is_sdk'] = True
                 elif static_lib:
                     static_args['is_sdk'] = True
 
--- a/security/manager/ssl/tests/unit/tlsserver/lib/OCSPCommon.cpp
+++ b/security/manager/ssl/tests/unit/tlsserver/lib/OCSPCommon.cpp
@@ -147,17 +147,17 @@ GetOCSPResponseForType(OCSPResponseType 
       break;
   }
   if (aORT == ORTSkipResponseBytes) {
     context.skipResponseBytes = true;
   }
   if (aORT == ORTExpired || aORT == ORTExpiredFreshCA ||
       aORT == ORTRevokedOld || aORT == ORTUnknownOld) {
     context.thisUpdate = oldNow;
-    context.nextUpdate = oldNow + 10;
+    context.nextUpdate = oldNow + Time::ONE_DAY_IN_SECONDS;
   }
   if (aORT == ORTLongValidityAlmostExpired) {
     context.thisUpdate = now - (320 * Time::ONE_DAY_IN_SECONDS);
   }
   if (aORT == ORTAncientAlmostExpired) {
     context.thisUpdate = now - (640 * Time::ONE_DAY_IN_SECONDS);
   }
   if (aORT == ORTRevoked || aORT == ORTRevokedOld) {
--- a/security/pkix/test/gtest/pkixbuild_tests.cpp
+++ b/security/pkix/test/gtest/pkixbuild_tests.cpp
@@ -35,18 +35,16 @@ using namespace mozilla::pkix::test;
 typedef ScopedPtr<CERTCertificate, CERT_DestroyCertificate>
           ScopedCERTCertificate;
 typedef ScopedPtr<CERTCertList, CERT_DestroyCertList> ScopedCERTCertList;
 
 static ByteString
 CreateCert(const char* issuerCN,
            const char* subjectCN,
            EndEntityOrCA endEntityOrCA,
-           /*optional*/ TestKeyPair* issuerKey,
-           /*out*/ ScopedTestKeyPair& subjectKey,
            /*out*/ ScopedCERTCertificate* subjectCert = nullptr)
 {
   static long serialNumberValue = 0;
   ++serialNumberValue;
   ByteString serialNumber(CreateEncodedSerialNumber(serialNumberValue));
   EXPECT_FALSE(ENCODING_FAILED(serialNumber));
 
   ByteString issuerDER(CNToDERName(issuerCN));
@@ -55,23 +53,22 @@ CreateCert(const char* issuerCN,
   ByteString extensions[2];
   if (endEntityOrCA == EndEntityOrCA::MustBeCA) {
     extensions[0] =
       CreateEncodedBasicConstraints(true, nullptr,
                                     ExtensionCriticality::Critical);
     EXPECT_FALSE(ENCODING_FAILED(extensions[0]));
   }
 
+  ScopedTestKeyPair reusedKey(CloneReusedKeyPair());
   ByteString certDER(CreateEncodedCertificate(
-                       v3, sha256WithRSAEncryption,
-                       serialNumber, issuerDER,
-                       oneDayBeforeNow, oneDayAfterNow,
-                       subjectDER, extensions, issuerKey,
-                       sha256WithRSAEncryption,
-                       subjectKey));
+                       v3, sha256WithRSAEncryption, serialNumber, issuerDER,
+                       oneDayBeforeNow, oneDayAfterNow, subjectDER,
+                       *reusedKey, extensions, *reusedKey,
+                       sha256WithRSAEncryption));
   EXPECT_FALSE(ENCODING_FAILED(certDER));
   if (subjectCert) {
     SECItem certDERItem = {
       siBuffer,
       const_cast<uint8_t*>(certDER.data()),
       static_cast<unsigned int>(certDER.length())
     };
     *subjectCert = CERT_NewTempCertificate(CERT_GetDefaultCertDB(),
@@ -95,17 +92,17 @@ public:
 
     static_assert(MOZILLA_PKIX_ARRAY_LENGTH(names) ==
                     MOZILLA_PKIX_ARRAY_LENGTH(certChainTail),
                   "mismatch in sizes of names and certChainTail arrays");
 
     for (size_t i = 0; i < MOZILLA_PKIX_ARRAY_LENGTH(names); ++i) {
       const char* issuerName = i == 0 ? names[0] : names[i-1];
       (void) CreateCert(issuerName, names[i], EndEntityOrCA::MustBeCA,
-                        leafCAKey.get(), leafCAKey, &certChainTail[i]);
+                        &certChainTail[i]);
     }
 
     return true;
   }
 
 private:
   virtual Result GetCertTrust(EndEntityOrCA, const CertPolicyId&,
                               Input candidateCert,
@@ -187,17 +184,16 @@ private:
     return TestCheckPublicKey(subjectPublicKeyInfo);
   }
 
   // We hold references to CERTCertificates in the cert chain tail so that we
   // CERT_CreateSubjectCertList can find them.
   ScopedCERTCertificate certChainTail[7];
 
 public:
-  ScopedTestKeyPair leafCAKey;
   CERTCertificate* GetLeafCACert() const
   {
     return certChainTail[MOZILLA_PKIX_ARRAY_LENGTH(certChainTail) - 1].get();
   }
 };
 
 class pkixbuild : public ::testing::Test
 {
@@ -233,64 +229,58 @@ TEST_F(pkixbuild, MaxAcceptableCertChain
                              EndEntityOrCA::MustBeCA,
                              KeyUsage::noParticularKeyUsageRequired,
                              KeyPurposeId::id_kp_serverAuth,
                              CertPolicyId::anyPolicy,
                              nullptr/*stapledOCSPResponse*/));
   }
 
   {
-    ScopedTestKeyPair unusedKeyPair;
     ScopedCERTCertificate cert;
     ByteString certDER(CreateCert("CA7", "Direct End-Entity",
-                                  EndEntityOrCA::MustBeEndEntity,
-                                  trustDomain.leafCAKey.get(), unusedKeyPair));
+                                  EndEntityOrCA::MustBeEndEntity));
     ASSERT_FALSE(ENCODING_FAILED(certDER));
     Input certDERInput;
     ASSERT_EQ(Success, certDERInput.Init(certDER.data(), certDER.length()));
     ASSERT_EQ(Success,
               BuildCertChain(trustDomain, certDERInput, Now(),
                              EndEntityOrCA::MustBeEndEntity,
                              KeyUsage::noParticularKeyUsageRequired,
                              KeyPurposeId::id_kp_serverAuth,
                              CertPolicyId::anyPolicy,
                              nullptr/*stapledOCSPResponse*/));
   }
 }
 
 TEST_F(pkixbuild, BeyondMaxAcceptableCertChainLength)
 {
   static char const* const caCertName = "CA Too Far";
-  ScopedTestKeyPair caKeyPair;
 
   // We need a CERTCertificate for caCert so that the trustdomain's FindIssuer
   // method can find it through the NSS cert DB.
   ScopedCERTCertificate caCert;
 
   {
     ByteString certDER(CreateCert("CA7", caCertName, EndEntityOrCA::MustBeCA,
-                                  trustDomain.leafCAKey.get(), caKeyPair,
                                   &caCert));
     ASSERT_FALSE(ENCODING_FAILED(certDER));
     Input certDERInput;
     ASSERT_EQ(Success, certDERInput.Init(certDER.data(), certDER.length()));
     ASSERT_EQ(Result::ERROR_UNKNOWN_ISSUER,
               BuildCertChain(trustDomain, certDERInput, Now(),
                              EndEntityOrCA::MustBeCA,
                              KeyUsage::noParticularKeyUsageRequired,
                              KeyPurposeId::id_kp_serverAuth,
                              CertPolicyId::anyPolicy,
                              nullptr/*stapledOCSPResponse*/));
   }
 
   {
-    ScopedTestKeyPair unusedKeyPair;
     ByteString certDER(CreateCert(caCertName, "End-Entity Too Far",
-                                  EndEntityOrCA::MustBeEndEntity,
-                                  caKeyPair.get(), unusedKeyPair));
+                                  EndEntityOrCA::MustBeEndEntity));
     ASSERT_FALSE(ENCODING_FAILED(certDER));
     Input certDERInput;
     ASSERT_EQ(Success, certDERInput.Init(certDER.data(), certDER.length()));
     ASSERT_EQ(Result::ERROR_UNKNOWN_ISSUER,
               BuildCertChain(trustDomain, certDERInput, Now(),
                              EndEntityOrCA::MustBeEndEntity,
                              KeyUsage::noParticularKeyUsageRequired,
                              KeyPurposeId::id_kp_serverAuth,
@@ -378,35 +368,33 @@ public:
 
 private:
   ByteString rootDER;
 };
 
 TEST_F(pkixbuild, NoRevocationCheckingForExpiredCert)
 {
   const char* rootCN = "Root CA";
-  ScopedTestKeyPair rootKey;
   ByteString rootDER(CreateCert(rootCN, rootCN, EndEntityOrCA::MustBeCA,
-                                nullptr, rootKey, nullptr));
+                                nullptr));
   EXPECT_FALSE(ENCODING_FAILED(rootDER));
   ExpiredCertTrustDomain expiredCertTrustDomain(rootDER);
 
   ByteString serialNumber(CreateEncodedSerialNumber(100));
   EXPECT_FALSE(ENCODING_FAILED(serialNumber));
   ByteString issuerDER(CNToDERName(rootCN));
   ByteString subjectDER(CNToDERName("Expired End-Entity Cert"));
-  ScopedTestKeyPair unusedSubjectKey;
+  ScopedTestKeyPair reusedKey(CloneReusedKeyPair());
   ByteString certDER(CreateEncodedCertificate(
                        v3, sha256WithRSAEncryption,
                        serialNumber, issuerDER,
                        oneDayBeforeNow - Time::ONE_DAY_IN_SECONDS,
                        oneDayBeforeNow,
-                       subjectDER, nullptr, rootKey.get(),
-                       sha256WithRSAEncryption,
-                       unusedSubjectKey));
+                       subjectDER, *reusedKey, nullptr, *reusedKey,
+                       sha256WithRSAEncryption));
   EXPECT_FALSE(ENCODING_FAILED(certDER));
 
   Input cert;
   ASSERT_EQ(Success, cert.Init(certDER.data(), certDER.length()));
   ASSERT_EQ(Result::ERROR_EXPIRED_CERTIFICATE,
             BuildCertChain(expiredCertTrustDomain, cert, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
--- a/security/pkix/test/gtest/pkixcert_extension_tests.cpp
+++ b/security/pkix/test/gtest/pkixcert_extension_tests.cpp
@@ -26,45 +26,42 @@
 #include "pkixgtest.h"
 #include "pkixtestutil.h"
 
 using namespace mozilla::pkix;
 using namespace mozilla::pkix::test;
 
 // Creates a self-signed certificate with the given extension.
 static ByteString
-CreateCert(const char* subjectCN,
-           const ByteString* extensions, // empty-string-terminated array
-           /*out*/ ScopedTestKeyPair& subjectKey)
+CreateCertWithExtensions(const char* subjectCN,
+                         const ByteString* extensions)
 {
   static long serialNumberValue = 0;
   ++serialNumberValue;
   ByteString serialNumber(CreateEncodedSerialNumber(serialNumberValue));
   EXPECT_FALSE(ENCODING_FAILED(serialNumber));
   ByteString issuerDER(CNToDERName(subjectCN));
   EXPECT_FALSE(ENCODING_FAILED(issuerDER));
   ByteString subjectDER(CNToDERName(subjectCN));
   EXPECT_FALSE(ENCODING_FAILED(subjectDER));
+  ScopedTestKeyPair subjectKey(CloneReusedKeyPair());
   return CreateEncodedCertificate(v3, sha256WithRSAEncryption,
                                   serialNumber, issuerDER,
                                   oneDayBeforeNow, oneDayAfterNow,
-                                  subjectDER, extensions,
-                                  nullptr,
-                                  sha256WithRSAEncryption,
-                                  subjectKey);
+                                  subjectDER, *subjectKey, extensions,
+                                  *subjectKey,
+                                  sha256WithRSAEncryption);
 }
 
 // Creates a self-signed certificate with the given extension.
 static ByteString
-CreateCert(const char* subjectStr,
-           const ByteString& extension,
-           /*out*/ ScopedTestKeyPair& subjectKey)
+CreateCertWithOneExtension(const char* subjectStr, const ByteString& extension)
 {
   const ByteString extensions[] = { extension, ByteString() };
-  return CreateCert(subjectStr, extensions, subjectKey);
+  return CreateCertWithExtensions(subjectStr, extensions);
 }
 
 class TrustEverythingTrustDomain : public TrustDomain
 {
 private:
   virtual Result GetCertTrust(EndEntityOrCA, const CertPolicyId&,
                               Input candidateCert,
                               /*out*/ TrustLevel& trustLevel)
@@ -131,18 +128,17 @@ TEST_F(pkixcert_extension, UnknownCritic
         0x85, 0x1a, 0x85, 0x1a, 0x01, 0x83, 0x74, 0x09, 0x03,
       0x01, 0x01, 0xff, // BOOLEAN (length = 1) TRUE
       0x04, 0x00 // OCTET STRING (length = 0)
   };
   static const ByteString
     unknownCriticalExtension(unknownCriticalExtensionBytes,
                              sizeof(unknownCriticalExtensionBytes));
   const char* certCN = "Cert With Unknown Critical Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, unknownCriticalExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN, unknownCriticalExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Result::ERROR_UNKNOWN_CRITICAL_EXTENSION,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -161,18 +157,18 @@ TEST_F(pkixcert_extension, UnknownNonCri
         0x2b, 0x06, 0x01, 0x04, 0x01, 0xeb, 0x49, 0x85, 0x1a,
         0x85, 0x1a, 0x85, 0x1a, 0x01, 0x83, 0x74, 0x09, 0x03,
       0x04, 0x00 // OCTET STRING (length = 0)
   };
   static const ByteString
     unknownNonCriticalExtension(unknownNonCriticalExtensionBytes,
                                 sizeof(unknownNonCriticalExtensionBytes));
   const char* certCN = "Cert With Unknown NonCritical Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, unknownNonCriticalExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN,
+                                             unknownNonCriticalExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Success,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -192,18 +188,18 @@ TEST_F(pkixcert_extension, WrongOIDCriti
         0x2b, 0x06, 0x06, 0x01, 0x05, 0x05, 0x07, 0x01, 0x01,
       0x01, 0x01, 0xff, // BOOLEAN (length = 1) TRUE
       0x04, 0x00 // OCTET STRING (length = 0)
   };
   static const ByteString
     wrongOIDCriticalExtension(wrongOIDCriticalExtensionBytes,
                               sizeof(wrongOIDCriticalExtensionBytes));
   const char* certCN = "Cert With Critical Wrong OID Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, wrongOIDCriticalExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN,
+                                             wrongOIDCriticalExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Result::ERROR_UNKNOWN_CRITICAL_EXTENSION,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -225,18 +221,17 @@ TEST_F(pkixcert_extension, CriticalAIAEx
       0x01, 0x01, 0xff, // BOOLEAN (length = 1) TRUE
       0x04, 0x02, // OCTET STRING (length = 2)
         0x30, 0x00, // SEQUENCE (length = 0)
   };
   static const ByteString
     criticalAIAExtension(criticalAIAExtensionBytes,
                          sizeof(criticalAIAExtensionBytes));
   const char* certCN = "Cert With Critical AIA Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, criticalAIAExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN, criticalAIAExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Success,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -255,18 +250,18 @@ TEST_F(pkixcert_extension, UnknownCritic
         0x55, 0x1d, 0x37, // 2.5.29.55
       0x01, 0x01, 0xff, // BOOLEAN (length = 1) TRUE
       0x04, 0x00 // OCTET STRING (length = 0)
   };
   static const ByteString
     unknownCriticalCEExtension(unknownCriticalCEExtensionBytes,
                                sizeof(unknownCriticalCEExtensionBytes));
   const char* certCN = "Cert With Unknown Critical id-ce Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, unknownCriticalCEExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN,
+                                             unknownCriticalCEExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Result::ERROR_UNKNOWN_CRITICAL_EXTENSION,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -285,18 +280,17 @@ TEST_F(pkixcert_extension, KnownCritical
       0x01, 0x01, 0xff, // BOOLEAN (length = 1) TRUE
       0x04, 0x03, // OCTET STRING (length = 3)
         0x02, 0x01, 0x00, // INTEGER (length = 1, value = 0)
   };
   static const ByteString
     criticalCEExtension(criticalCEExtensionBytes,
                         sizeof(criticalCEExtensionBytes));
   const char* certCN = "Cert With Known Critical id-ce Extension";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, criticalCEExtension, key));
+  ByteString cert(CreateCertWithOneExtension(certCN, criticalCEExtension));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Success,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
@@ -314,18 +308,17 @@ TEST_F(pkixcert_extension, DuplicateSubj
       0x04, 15, // OCTET STRING (length = 15)
         0x30, 13, // GeneralNames (SEQUENCE) (length = 13)
           0x82, 11, // [2] (dNSName) (length = 11)
             'e', 'x', 'a', 'm', 'p', 'l', 'e', '.', 'c', 'o', 'm'
   };
   static const ByteString DER(DER_BYTES, sizeof(DER_BYTES));
   static const ByteString extensions[] = { DER, DER, ByteString() };
   static const char* certCN = "Cert With Duplicate subjectAltName";
-  ScopedTestKeyPair key;
-  ByteString cert(CreateCert(certCN, extensions, key));
+  ByteString cert(CreateCertWithExtensions(certCN, extensions));
   ASSERT_FALSE(ENCODING_FAILED(cert));
   Input certInput;
   ASSERT_EQ(Success, certInput.Init(cert.data(), cert.length()));
   ASSERT_EQ(Result::ERROR_EXTENSION_VALUE_INVALID,
             BuildCertChain(trustDomain, certInput, Now(),
                            EndEntityOrCA::MustBeEndEntity,
                            KeyUsage::noParticularKeyUsageRequired,
                            KeyPurposeId::anyExtendedKeyUsage,
--- a/security/pkix/test/gtest/pkixcert_signature_algorithm_tests.cpp
+++ b/security/pkix/test/gtest/pkixcert_signature_algorithm_tests.cpp
@@ -11,18 +11,16 @@
 using namespace mozilla::pkix;
 using namespace mozilla::pkix::test;
 
 static ByteString
 CreateCert(const char* issuerCN,
            const char* subjectCN,
            EndEntityOrCA endEntityOrCA,
            const ByteString& signatureAlgorithm,
-           /*optional*/ TestKeyPair* issuerKey,
-           /*out*/ ScopedTestKeyPair& subjectKey,
            /*out*/ ByteString& subjectDER)
 {
   static long serialNumberValue = 0;
   ++serialNumberValue;
   ByteString serialNumber(CreateEncodedSerialNumber(serialNumberValue));
   EXPECT_FALSE(ENCODING_FAILED(serialNumber));
 
   ByteString issuerDER(CNToDERName(issuerCN));
@@ -33,22 +31,23 @@ CreateCert(const char* issuerCN,
   ByteString extensions[2];
   if (endEntityOrCA == EndEntityOrCA::MustBeCA) {
     extensions[0] =
       CreateEncodedBasicConstraints(true, nullptr,
                                     ExtensionCriticality::Critical);
     EXPECT_FALSE(ENCODING_FAILED(extensions[0]));
   }
 
+  ScopedTestKeyPair reusedKey(CloneReusedKeyPair());
   ByteString certDER(CreateEncodedCertificate(v3, signatureAlgorithm,
-                                              serialNumber,
-                                              issuerDER, oneDayBeforeNow,
-                                              oneDayAfterNow, subjectDER,
-                                              extensions, issuerKey,
-                                              signatureAlgorithm, subjectKey));
+                                              serialNumber, issuerDER,
+                                              oneDayBeforeNow, oneDayAfterNow,
+                                              subjectDER, *reusedKey,
+                                              extensions, *reusedKey,
+                                              signatureAlgorithm));
   EXPECT_FALSE(ENCODING_FAILED(certDER));
   return certDER;
 }
 
 class AlgorithmTestsTrustDomain : public TrustDomain
 {
 public:
   AlgorithmTestsTrustDomain(const ByteString& rootDER,
@@ -201,54 +200,48 @@ class pkixcert_IsValidChainForAlgorithm
   , public ::testing::WithParamInterface<ChainValidity>
 {
 };
 
 TEST_P(pkixcert_IsValidChainForAlgorithm, IsValidChainForAlgorithm)
 {
   const ChainValidity& chainValidity(GetParam());
   const char* rootCN = "CN=Root";
-  ScopedTestKeyPair rootKey;
   ByteString rootSubjectDER;
   ByteString rootEncoded(
     CreateCert(rootCN, rootCN, EndEntityOrCA::MustBeCA,
-               chainValidity.rootSignatureAlgorithm,
-               nullptr, rootKey, rootSubjectDER));
+               chainValidity.rootSignatureAlgorithm, rootSubjectDER));
   EXPECT_FALSE(ENCODING_FAILED(rootEncoded));
   EXPECT_FALSE(ENCODING_FAILED(rootSubjectDER));
 
   const char* issuerCN = rootCN;
-  TestKeyPair* issuerKey = rootKey.get();
 
   const char* intermediateCN = "CN=Intermediate";
-  ScopedTestKeyPair intermediateKey;
   ByteString intermediateSubjectDER;
   ByteString intermediateEncoded;
   if (chainValidity.optionalIntermediateSignatureAlgorithm != NO_INTERMEDIATE) {
     intermediateEncoded =
       CreateCert(rootCN, intermediateCN, EndEntityOrCA::MustBeCA,
                  chainValidity.optionalIntermediateSignatureAlgorithm,
-                 rootKey.get(), intermediateKey, intermediateSubjectDER);
+                 intermediateSubjectDER);
     EXPECT_FALSE(ENCODING_FAILED(intermediateEncoded));
     EXPECT_FALSE(ENCODING_FAILED(intermediateSubjectDER));
     issuerCN = intermediateCN;
-    issuerKey = intermediateKey.get();
   }
 
   AlgorithmTestsTrustDomain trustDomain(rootEncoded, rootSubjectDER,
                                         intermediateEncoded,
                                         intermediateSubjectDER);
 
   const char* endEntityCN = "CN=End Entity";
-  ScopedTestKeyPair endEntityKey;
   ByteString endEntitySubjectDER;
   ByteString endEntityEncoded(
     CreateCert(issuerCN, endEntityCN, EndEntityOrCA::MustBeEndEntity,
                chainValidity.endEntitySignatureAlgorithm,
-               issuerKey, endEntityKey, endEntitySubjectDER));
+               endEntitySubjectDER));
   EXPECT_FALSE(ENCODING_FAILED(endEntityEncoded));
   EXPECT_FALSE(ENCODING_FAILED(endEntitySubjectDER));
 
   Input endEntity;
   ASSERT_EQ(Success, endEntity.Init(endEntityEncoded.data(),
                                     endEntityEncoded.length()));
   Result expectedResult = chainValidity.isValid
                         ? Success
--- a/security/pkix/test/gtest/pkixocsp_VerifyEncodedOCSPResponse.cpp
+++ b/security/pkix/test/gtest/pkixocsp_VerifyEncodedOCSPResponse.cpp
@@ -418,23 +418,23 @@ protected:
 
     const ByteString extensions[] = {
       signerEKUDER
         ? CreateEncodedEKUExtension(*signerEKUDER,
                                     ExtensionCriticality::NotCritical)
         : ByteString(),
       ByteString()
     };
-    ScopedTestKeyPair signerKeyPair;
+    ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
     ByteString signerDER(CreateEncodedCertificate(
-                           ++rootIssuedCount, rootName,
-                           oneDayBeforeNow, oneDayAfterNow, certSubjectName,
-                           certSignatureAlgorithm,
+                           ++rootIssuedCount, certSignatureAlgorithm,
+                           rootName, oneDayBeforeNow, oneDayAfterNow,
+                           certSubjectName, *signerKeyPair,
                            signerEKUDER ? extensions : nullptr,
-                           rootKeyPair.get(), signerKeyPair));
+                           *rootKeyPair));
     EXPECT_FALSE(ENCODING_FAILED(signerDER));
     if (signerDEROut) {
       *signerDEROut = signerDER;
     }
 
     ByteString signerNameDER;
     if (signerName) {
       signerNameDER = CNToDERName(signerName);
@@ -446,45 +446,42 @@ protected:
                                                oneDayBeforeNow,
                                                oneDayBeforeNow,
                                                &oneDayAfterNow,
                                                sha256WithRSAEncryption,
                                                certs);
   }
 
   static ByteString CreateEncodedCertificate(uint32_t serialNumber,
+                                             const ByteString& signatureAlg,
                                              const char* issuer,
                                              time_t notBefore,
                                              time_t notAfter,
                                              const char* subject,
-                                             const ByteString& signatureAlg,
+                                             const TestKeyPair& subjectKeyPair,
                                 /*optional*/ const ByteString* extensions,
-                                /*optional*/ TestKeyPair* signerKeyPair,
-                                     /*out*/ ScopedTestKeyPair& keyPair)
+                                             const TestKeyPair& signerKeyPair)
   {
     ByteString serialNumberDER(CreateEncodedSerialNumber(serialNumber));
     if (ENCODING_FAILED(serialNumberDER)) {
       return ByteString();
     }
     ByteString issuerDER(CNToDERName(issuer));
     if (ENCODING_FAILED(issuerDER)) {
       return ByteString();
     }
     ByteString subjectDER(CNToDERName(subject));
     if (ENCODING_FAILED(subjectDER)) {
       return ByteString();
     }
     return ::mozilla::pkix::test::CreateEncodedCertificate(
-                                    v3,
-                                    signatureAlg,
-                                    serialNumberDER, issuerDER, notBefore,
-                                    notAfter, subjectDER, extensions,
-                                    signerKeyPair,
-                                    signatureAlg,
-                                    keyPair);
+                                    v3, signatureAlg, serialNumberDER,
+                                    issuerDER, notBefore, notAfter,
+                                    subjectDER, subjectKeyPair, extensions,
+                                    signerKeyPair, signatureAlg);
   }
 
   static const Input OCSPSigningEKUDER;
 };
 
 /*static*/ const Input pkixocsp_VerifyEncodedResponse_DelegatedResponder::
   OCSPSigningEKUDER(tlv_id_kp_OCSPSigning);
 
@@ -572,23 +569,23 @@ TEST_F(pkixocsp_VerifyEncodedResponse_De
   static const char* signerName = "good_indirect_expired";
 
   const ByteString extensions[] = {
     CreateEncodedEKUExtension(OCSPSigningEKUDER,
                               ExtensionCriticality::NotCritical),
     ByteString()
   };
 
-  ScopedTestKeyPair signerKeyPair;
+  ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
   ByteString signerDER(CreateEncodedCertificate(
-                          ++rootIssuedCount, rootName,
+                          ++rootIssuedCount, sha256WithRSAEncryption, rootName,
                           now - (10 * Time::ONE_DAY_IN_SECONDS),
                           now - (2 * Time::ONE_DAY_IN_SECONDS),
-                          signerName, sha256WithRSAEncryption, extensions,
-                          rootKeyPair.get(), signerKeyPair));
+                          signerName, *signerKeyPair, extensions,
+                          *rootKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(signerDER));
 
   ByteString certs[] = { signerDER, ByteString() };
   ByteString responseString(
                CreateEncodedOCSPSuccessfulResponse(
                          OCSPResponseContext::good, *endEntityCertID,
                          signerName, *signerKeyPair, oneDayBeforeNow,
                          oneDayBeforeNow, &oneDayAfterNow,
@@ -608,23 +605,24 @@ TEST_F(pkixocsp_VerifyEncodedResponse_De
   static const char* signerName = "good_indirect_future";
 
   const ByteString extensions[] = {
     CreateEncodedEKUExtension(OCSPSigningEKUDER,
                               ExtensionCriticality::NotCritical),
     ByteString()
   };
 
-  ScopedTestKeyPair signerKeyPair;
+  ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
   ByteString signerDER(CreateEncodedCertificate(
-                         ++rootIssuedCount, rootName,
+                         ++rootIssuedCount, sha256WithRSAEncryption,
+                         rootName,
                          now + (2 * Time::ONE_DAY_IN_SECONDS),
                          now + (10 * Time::ONE_DAY_IN_SECONDS),
-                         signerName, sha256WithRSAEncryption, extensions,
-                         rootKeyPair.get(), signerKeyPair));
+                         signerName, *signerKeyPair, extensions,
+                         *rootKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(signerDER));
 
   ByteString certs[] = { signerDER, ByteString() };
   ByteString responseString(
                CreateEncodedOCSPSuccessfulResponse(
                          OCSPResponseContext::good, *endEntityCertID,
                          signerName, *signerKeyPair, oneDayBeforeNow,
                          oneDayBeforeNow, &oneDayAfterNow,
@@ -714,21 +712,21 @@ TEST_F(pkixocsp_VerifyEncodedResponse_De
   ASSERT_TRUE(unknownKeyPair);
 
   // Delegated responder cert signed by unknown issuer
   const ByteString extensions[] = {
     CreateEncodedEKUExtension(OCSPSigningEKUDER,
                               ExtensionCriticality::NotCritical),
     ByteString()
   };
-  ScopedTestKeyPair signerKeyPair;
+  ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
   ByteString signerDER(CreateEncodedCertificate(
-                         1, subCAName, oneDayBeforeNow, oneDayAfterNow,
-                         signerName, sha256WithRSAEncryption, extensions,
-                         unknownKeyPair.get(), signerKeyPair));
+                         1, sha256WithRSAEncryption, subCAName,
+                         oneDayBeforeNow, oneDayAfterNow, signerName,
+                         *signerKeyPair, extensions, *unknownKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(signerDER));
 
   // OCSP response signed by that delegated responder
   ByteString certs[] = { signerDER, ByteString() };
   ByteString responseString(
                CreateEncodedOCSPSuccessfulResponse(
                          OCSPResponseContext::good, *endEntityCertID,
                          signerName, *signerKeyPair, oneDayBeforeNow,
@@ -754,35 +752,34 @@ TEST_F(pkixocsp_VerifyEncodedResponse_De
   static const char* subCAName = "good_indirect_subca_1_first sub-CA";
   static const char* signerName = "good_indirect_subca_1_first OCSP signer";
 
   // sub-CA of root (root is the direct issuer of endEntity)
   const ByteString subCAExtensions[] = {
     CreateEncodedBasicConstraints(true, 0, ExtensionCriticality::NotCritical),
     ByteString()
   };
-  ScopedTestKeyPair subCAKeyPair;
+  ScopedTestKeyPair subCAKeyPair(GenerateKeyPair());
   ByteString subCADER(CreateEncodedCertificate(
-                        ++rootIssuedCount, rootName,
-                        oneDayBeforeNow, oneDayAfterNow,
-                        subCAName, sha256WithRSAEncryption,
-                        subCAExtensions, rootKeyPair.get(), subCAKeyPair));
+                        ++rootIssuedCount, sha256WithRSAEncryption, rootName,
+                        oneDayBeforeNow, oneDayAfterNow, subCAName,
+                        *subCAKeyPair, subCAExtensions, *rootKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(subCADER));
 
   // Delegated responder cert signed by that sub-CA
   const ByteString extensions[] = {
     CreateEncodedEKUExtension(OCSPSigningEKUDER,
                               ExtensionCriticality::NotCritical),
     ByteString(),
   };
-  ScopedTestKeyPair signerKeyPair;
+  ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
   ByteString signerDER(CreateEncodedCertificate(
-                         1, subCAName, oneDayBeforeNow, oneDayAfterNow,
-                         signerName, sha256WithRSAEncryption, extensions,
-                         subCAKeyPair.get(), signerKeyPair));
+                         1, sha256WithRSAEncryption, subCAName,
+                         oneDayBeforeNow, oneDayAfterNow, signerName,
+                         *signerKeyPair, extensions, *subCAKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(signerDER));
 
   // OCSP response signed by the delegated responder issued by the sub-CA
   // that is trying to impersonate the root.
   ByteString certs[] = { subCADER, signerDER, ByteString() };
   ByteString responseString(
                CreateEncodedOCSPSuccessfulResponse(
                          OCSPResponseContext::good, *endEntityCertID,
@@ -809,37 +806,36 @@ TEST_F(pkixocsp_VerifyEncodedResponse_De
   static const char* subCAName = "good_indirect_subca_1_second sub-CA";
   static const char* signerName = "good_indirect_subca_1_second OCSP signer";
 
   // sub-CA of root (root is the direct issuer of endEntity)
   const ByteString subCAExtensions[] = {
     CreateEncodedBasicConstraints(true, 0, ExtensionCriticality::NotCritical),
     ByteString()
   };
-  ScopedTestKeyPair subCAKeyPair;
-  ByteString subCADER(CreateEncodedCertificate(++rootIssuedCount, rootName,
+  ScopedTestKeyPair subCAKeyPair(GenerateKeyPair());
+  ByteString subCADER(CreateEncodedCertificate(++rootIssuedCount,
+                                               sha256WithRSAEncryption,
+                                               rootName,
                                                oneDayBeforeNow, oneDayAfterNow,
-                                               subCAName,
-                                               sha256WithRSAEncryption,
-                                               subCAExtensions,
-                                               rootKeyPair.get(),
-                                               subCAKeyPair));
+                                               subCAName, *subCAKeyPair,
+                                               subCAExtensions, *rootKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(subCADER));
 
   // Delegated responder cert signed by that sub-CA
   const ByteString extensions[] = {
     CreateEncodedEKUExtension(OCSPSigningEKUDER,
                               ExtensionCriticality::NotCritical),
     ByteString()
   };
-  ScopedTestKeyPair signerKeyPair;
+  ScopedTestKeyPair signerKeyPair(GenerateKeyPair());
   ByteString signerDER(CreateEncodedCertificate(
-                         1, subCAName, oneDayBeforeNow, oneDayAfterNow,
-                         signerName, sha256WithRSAEncryption, extensions,
-                         subCAKeyPair.get(), signerKeyPair));
+                         1, sha256WithRSAEncryption, subCAName,
+                         oneDayBeforeNow, oneDayAfterNow, signerName,
+                         *signerKeyPair, extensions, *subCAKeyPair));
   ASSERT_FALSE(ENCODING_FAILED(signerDER));
 
   // OCSP response signed by the delegated responder issued by the sub-CA
   // that is trying to impersonate the root.
   ByteString certs[] = { signerDER, subCADER, ByteString() };
   ByteString responseString(
                  CreateEncodedOCSPSuccessfulResponse(
                          OCSPResponseContext::good, *endEntityCertID,
--- a/security/pkix/test/lib/pkixtestnss.cpp
+++ b/security/pkix/test/lib/pkixtestnss.cpp
@@ -27,16 +27,17 @@
 #include <limits>
 
 #include "cryptohi.h"
 #include "keyhi.h"
 #include "nss.h"
 #include "pk11pub.h"
 #include "pkix/pkixnss.h"
 #include "pkixder.h"
+#include "prinit.h"
 #include "secerr.h"
 #include "secitem.h"
 
 namespace mozilla { namespace pkix { namespace test {
 
 namespace {
 
 typedef ScopedPtr<SECKEYPublicKey, SECKEY_DestroyPublicKey>
@@ -47,23 +48,34 @@ typedef ScopedPtr<SECKEYPrivateKey, SECK
 inline void
 SECITEM_FreeItem_true(SECItem* item)
 {
   SECITEM_FreeItem(item, true);
 }
 
 typedef mozilla::pkix::ScopedPtr<SECItem, SECITEM_FreeItem_true> ScopedSECItem;
 
-Result
+TestKeyPair* GenerateKeyPairInner();
+
+void
 InitNSSIfNeeded()
 {
   if (NSS_NoDB_Init(nullptr) != SECSuccess) {
-    return MapPRErrorCodeToResult(PR_GetError());
+    abort();
   }
-  return Success;
+}
+
+static ScopedTestKeyPair reusedKeyPair;
+
+PRStatus
+InitReusedKeyPair()
+{
+  InitNSSIfNeeded();
+  reusedKeyPair = GenerateKeyPairInner();
+  return reusedKeyPair ? PR_SUCCESS : PR_FAILURE;
 }
 
 class NSSTestKeyPair : public TestKeyPair
 {
 public:
   // NSSTestKeyPair takes ownership of privateKey.
   NSSTestKeyPair(const ByteString& spki,
                  const ByteString& spk,
@@ -137,26 +149,24 @@ private:
 // Ownership of privateKey is transfered.
 TestKeyPair* CreateTestKeyPair(const ByteString& spki,
                                const ByteString& spk,
                                SECKEYPrivateKey* privateKey)
 {
   return new (std::nothrow) NSSTestKeyPair(spki, spk, privateKey);
 }
 
+namespace {
+
 TestKeyPair*
-GenerateKeyPair()
+GenerateKeyPairInner()
 {
-  if (InitNSSIfNeeded() != Success) {
-    return nullptr;
-  }
-
   ScopedPtr<PK11SlotInfo, PK11_FreeSlot> slot(PK11_GetInternalSlot());
   if (!slot) {
-    return nullptr;
+    abort();
   }
 
   // Bug 1012786: PK11_GenerateKeyPair can fail if there is insufficient
   // entropy to generate a random key. Attempting to add some entropy and
   // retrying appears to solve this issue.
   for (uint32_t retries = 0; retries < 10; retries++) {
     PK11RSAGenParams params;
     params.keySizeInBits = 2048;
@@ -166,22 +176,22 @@ GenerateKeyPair()
       privateKey(PK11_GenerateKeyPair(slot.get(), CKM_RSA_PKCS_KEY_PAIR_GEN,
                                       &params, &publicKeyTemp, false, true,
                                       nullptr));
     ScopedSECKEYPublicKey publicKey(publicKeyTemp);
     if (privateKey) {
       ScopedSECItem
         spkiDER(SECKEY_EncodeDERSubjectPublicKeyInfo(publicKey.get()));
       if (!spkiDER) {
-        return nullptr;
+        break;
       }
       ScopedPtr<CERTSubjectPublicKeyInfo, SECKEY_DestroySubjectPublicKeyInfo>
         spki(SECKEY_CreateSubjectPublicKeyInfo(publicKey.get()));
       if (!spki) {
-        return nullptr;
+        break;
       }
       SECItem spkDER = spki->subjectPublicKey;
       DER_ConvertBitString(&spkDER); // bits to bytes
       return CreateTestKeyPair(ByteString(spkiDER->data, spkiDER->len),
                                ByteString(spkDER.data, spkDER.len),
                                privateKey.release());
     }
 
@@ -196,59 +206,75 @@ GenerateKeyPair()
     // https://xkcd.com/221/
     static const uint8_t RANDOM_NUMBER[] = { 4, 4, 4, 4, 4, 4, 4, 4 };
     if (PK11_RandomUpdate((void*) &RANDOM_NUMBER,
                           sizeof(RANDOM_NUMBER)) != SECSuccess) {
       break;
     }
   }
 
+  abort();
+#if defined(_MSC_VER) && (_MSC_VER < 1700)
+  // Older versions of MSVC don't know that abort() never returns, so silence
+  // its warning by adding a redundant and never-reached return. But, only do
+  // it for that ancient compiler, because some other compilers will rightly
+  // warn that the return statement is unreachable.
   return nullptr;
+#endif
+}
+
+} // unnamed namespace
+
+TestKeyPair*
+GenerateKeyPair()
+{
+  InitNSSIfNeeded();
+  return GenerateKeyPairInner();
+}
+
+TestKeyPair*
+CloneReusedKeyPair()
+{
+  static PRCallOnceType initCallOnce;
+  if (PR_CallOnce(&initCallOnce, InitReusedKeyPair) != PR_SUCCESS) {
+    abort();
+  }
+  assert(reusedKeyPair);
+  return reusedKeyPair->Clone();
 }
 
 ByteString
 SHA1(const ByteString& toHash)
 {
-  if (InitNSSIfNeeded() != Success) {
-    return ByteString();
-  }
+  InitNSSIfNeeded();
 
   uint8_t digestBuf[SHA1_LENGTH];
   SECStatus srv = PK11_HashBuf(SEC_OID_SHA1, digestBuf, toHash.data(),
                                static_cast<int32_t>(toHash.length()));
   if (srv != SECSuccess) {
     return ByteString();
   }
   return ByteString(digestBuf, sizeof(digestBuf));
 }
 
 Result
 TestCheckPublicKey(Input subjectPublicKeyInfo)
 {
-  Result rv = InitNSSIfNeeded();
-  if (rv != Success) {
-    return rv;
-  }
+  InitNSSIfNeeded();
   return CheckPublicKey(subjectPublicKeyInfo);
 }
 
 Result
 TestVerifySignedData(const SignedDataWithSignature& signedData,
                      Input subjectPublicKeyInfo)
 {
-  Result rv = InitNSSIfNeeded();
-  if (rv != Success) {
-    return rv;
-  }
+  InitNSSIfNeeded();
   return VerifySignedData(signedData, subjectPublicKeyInfo, nullptr);
 }
 
 Result
 TestDigestBuf(Input item, /*out*/ uint8_t* digestBuf, size_t digestBufLen)
 {
-  Result rv = InitNSSIfNeeded();
-  if (rv != Success) {
-    return rv;
-  }
+  InitNSSIfNeeded();
   return DigestBuf(item, digestBuf, digestBufLen);
 }
 
 } } } // namespace mozilla::pkix::test
--- a/security/pkix/test/lib/pkixtestutil.cpp
+++ b/security/pkix/test/lib/pkixtestutil.cpp
@@ -138,17 +138,17 @@ OCSPResponseContext::OCSPResponseContext
   , includeEmptyExtensions(false)
   , signatureAlgorithm(sha256WithRSAEncryption)
   , badSignature(false)
   , certs(nullptr)
 
   , certStatus(good)
   , revocationTime(0)
   , thisUpdate(time)
-  , nextUpdate(time + 10)
+  , nextUpdate(time + Time::ONE_DAY_IN_SECONDS)
   , includeNextUpdate(true)
 {
 }
 
 static ByteString ResponseBytes(OCSPResponseContext& context);
 static ByteString BasicOCSPResponse(OCSPResponseContext& context);
 static ByteString ResponseData(OCSPResponseContext& context);
 static ByteString ResponderID(OCSPResponseContext& context);
@@ -344,26 +344,23 @@ YMDHMS(int16_t year, int16_t month, int1
   totalSeconds += hour * 60 * 60;
   totalSeconds += minutes * 60;
   totalSeconds += seconds;
   return TimeFromElapsedSecondsAD(totalSeconds);
 }
 
 static ByteString
 SignedData(const ByteString& tbsData,
-           /*optional*/ TestKeyPair* keyPair,
+           const TestKeyPair& keyPair,
            const ByteString& signatureAlgorithm,
            bool corrupt, /*optional*/ const ByteString* certs)
 {
   ByteString signature;
-  if (keyPair) {
-    if (keyPair->SignData(tbsData, signatureAlgorithm, signature)
-          != Success) {
-       return ByteString();
-     }
+  if (keyPair.SignData(tbsData, signatureAlgorithm, signature) != Success) {
+    return ByteString();
   }
 
   // TODO: add ability to have signatures of bit length not divisible by 8,
   // resulting in unused bits in the bitstring encoding
   ByteString signatureNested(BitString(signature, corrupt));
   if (ENCODING_FAILED(signatureNested)) {
     return ByteString();
   }
@@ -459,50 +456,38 @@ static ByteString TBSCertificate(long ve
 //         signatureAlgorithm   AlgorithmIdentifier,
 //         signatureValue       BIT STRING  }
 ByteString
 CreateEncodedCertificate(long version, const ByteString& signature,
                          const ByteString& serialNumber,
                          const ByteString& issuerNameDER,
                          time_t notBefore, time_t notAfter,
                          const ByteString& subjectNameDER,
+                         const TestKeyPair& subjectKeyPair,
                          /*optional*/ const ByteString* extensions,
-                         /*optional*/ TestKeyPair* issuerKeyPair,
-                         const ByteString& signatureAlgorithm,
-                         /*out*/ ScopedTestKeyPair& keyPairResult)
+                         const TestKeyPair& issuerKeyPair,
+                         const ByteString& signatureAlgorithm)
 {
-  // It may be the case that privateKeyResult references the same TestKeyPair
-  // as issuerKeyPair. Thus, we can't set keyPairResult until after we're done
-  // with issuerKeyPair.
-  ScopedTestKeyPair subjectKeyPair(GenerateKeyPair());
-  if (!subjectKeyPair) {
-    return ByteString();
-  }
-
   ByteString tbsCertificate(TBSCertificate(version, serialNumber,
                                            signature, issuerNameDER, notBefore,
                                            notAfter, subjectNameDER,
-                                           subjectKeyPair->subjectPublicKeyInfo,
+                                           subjectKeyPair.subjectPublicKeyInfo,
                                            extensions));
   if (ENCODING_FAILED(tbsCertificate)) {
     return ByteString();
   }
 
-  ByteString result(SignedData(tbsCertificate,
-                               issuerKeyPair ? issuerKeyPair
-                                             : subjectKeyPair.get(),
+  ByteString result(SignedData(tbsCertificate, issuerKeyPair,
                                signatureAlgorithm, false, nullptr));
   if (ENCODING_FAILED(result)) {
     return ByteString();
   }
 
   MaybeLogOutput(result, "cert");
 
-  keyPairResult = subjectKeyPair.release();
-
   return result;
 }
 
 // TBSCertificate  ::=  SEQUENCE  {
 //      version         [0]  Version DEFAULT v1,
 //      serialNumber         CertificateSerialNumber,
 //      signature            AlgorithmIdentifier,
 //      issuer               Name,
@@ -758,20 +743,19 @@ ResponseBytes(OCSPResponseContext& conte
 ByteString
 BasicOCSPResponse(OCSPResponseContext& context)
 {
   ByteString tbsResponseData(ResponseData(context));
   if (ENCODING_FAILED(tbsResponseData)) {
     return ByteString();
   }
 
-  // TODO(bug 980538): certs
-  return SignedData(tbsResponseData, context.signerKeyPair.get(),
-                    context.signatureAlgorithm,
-                    context.badSignature, context.certs);
+  return SignedData(tbsResponseData, *context.signerKeyPair,
+                    context.signatureAlgorithm, context.badSignature,
+                    context.certs);
 }
 
 // Extension ::= SEQUENCE {
 //   id               OBJECT IDENTIFIER,
 //   critical         BOOLEAN DEFAULT FALSE
 //   value            OCTET STRING
 // }
 static ByteString
--- a/security/pkix/test/lib/pkixtestutil.h
+++ b/security/pkix/test/lib/pkixtestutil.h
@@ -129,16 +129,17 @@ protected:
     , subjectPublicKey(spk)
   {
   }
 
   TestKeyPair(const TestKeyPair&) /*= delete*/;
   void operator=(const TestKeyPair&) /*= delete*/;
 };
 
+TestKeyPair* CloneReusedKeyPair();
 TestKeyPair* GenerateKeyPair();
 inline void DeleteTestKeyPair(TestKeyPair* keyPair) { delete keyPair; }
 typedef ScopedPtr<TestKeyPair, DeleteTestKeyPair> ScopedTestKeyPair;
 
 ByteString SHA1(const ByteString& toHash);
 
 Result TestCheckPublicKey(Input subjectPublicKeyInfo);
 Result TestVerifySignedData(const SignedDataWithSignature& signedData,
@@ -167,29 +168,25 @@ enum Version { v1 = 0, v2 = 1, v3 = 2 };
 // be the same as signatureAlgorithm, which is the algorithm actually used
 // to sign the certificate.
 // serialNumber is assumed to be the DER encoding of an INTEGER.
 //
 // If extensions is null, then no extensions will be encoded. Otherwise,
 // extensions must point to an array of ByteStrings, terminated with an empty
 // ByteString. (If the first item of the array is empty then an empty
 // Extensions sequence will be encoded.)
-//
-// If issuerPrivateKey is null, then the certificate will be self-signed.
-// Parameter order is based on the order of the attributes of the certificate
-// in RFC 5280.
 ByteString CreateEncodedCertificate(long version, const ByteString& signature,
                                     const ByteString& serialNumber,
                                     const ByteString& issuerNameDER,
                                     time_t notBefore, time_t notAfter,
                                     const ByteString& subjectNameDER,
+                                    const TestKeyPair& subjectKeyPair,
                                     /*optional*/ const ByteString* extensions,
-                                    /*optional*/ TestKeyPair* issuerKeyPair,
-                                    const ByteString& signatureAlgorithm,
-                                    /*out*/ ScopedTestKeyPair& keyPairResult);
+                                    const TestKeyPair& issuerKeyPair,
+                                    const ByteString& signatureAlgorithm);
 
 ByteString CreateEncodedSerialNumber(long value);
 
 MOZILLA_PKIX_ENUM_CLASS ExtensionCriticality { NotCritical = 0, Critical = 1 };
 
 ByteString CreateEncodedBasicConstraints(bool isCA,
                                          /*optional*/ long* pathLenConstraint,
                                          ExtensionCriticality criticality);
--- a/testing/mochitest/Makefile.in
+++ b/testing/mochitest/Makefile.in
@@ -57,16 +57,20 @@ endif
 
 ifeq (windows,$(MOZ_WIDGET_TOOLKIT))
 TEST_HARNESS_BINS += screenshot$(BIN_SUFFIX)
 ifdef MOZ_METRO
 TEST_HARNESS_BINS += metrotestharness$(BIN_SUFFIX)
 endif
 endif
 
+ifdef MOZ_DMD
+TEST_HARNESS_BINS += dmd.py
+endif
+
 # Components / typelibs that don't get packaged with
 # the build, but that we need for the test harness.
 TEST_HARNESS_COMPONENTS := \
   test_necko.xpt \
   $(NULL)
 
 # We need the test plugin as some tests rely on it
 ifeq (Darwin,$(OS_TARGET))
--- a/testing/xpcshell/runxpcshelltests.py
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -601,16 +601,35 @@ class XPCShellTestThread(Thread):
         cmdT = self.buildCmdTestFile(name)
 
         args = self.xpcsRunArgs[:]
         if 'debug' in self.test_object:
             args.insert(0, '-d')
 
         completeCmd = cmdH + cmdT + args
 
+        if self.test_object.get('dmd') == 'true':
+            if sys.platform.startswith('linux'):
+                preloadEnvVar = 'LD_PRELOAD'
+                libdmd = os.path.join(self.xrePath, 'libdmd.so')
+            elif sys.platform == 'osx' or sys.platform == 'darwin':
+                preloadEnvVar = 'DYLD_INSERT_LIBRARIES'
+                # self.xrePath is <prefix>/Contents/Resources.
+                # We need <prefix>/Contents/MacOS/libdmd.dylib.
+                contents_dir = os.path.dirname(self.xrePath)
+                libdmd = os.path.join(contents_dir, 'MacOS', 'libdmd.dylib')
+            elif sys.platform == 'win32':
+                preloadEnvVar = 'MOZ_REPLACE_MALLOC_LIB'
+                libdmd = os.path.join(self.xrePath, 'dmd.dll')
+
+            self.env['DMD'] = '--mode=test'
+            self.env['PYTHON'] = sys.executable
+            self.env['BREAKPAD_SYMBOLS_PATH'] = self.symbolsPath
+            self.env[preloadEnvVar] = libdmd
+
         testTimeoutInterval = HARNESS_TIMEOUT
         # Allow a test to request a multiple of the timeout if it is expected to take long
         if 'requesttimeoutfactor' in self.test_object:
             testTimeoutInterval *= int(self.test_object['requesttimeoutfactor'])
 
         testTimer = None
         if not self.interactive and not self.debuggerInfo:
             testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(name, proc))
--- a/toolkit/devtools/server/actors/memory.js
+++ b/toolkit/devtools/server/actors/memory.js
@@ -253,17 +253,17 @@ let MemoryActor = protocol.ActorClass({
    *          profiling and done only when necessary.
    */
   getAllocations: method(expectState("attached", function() {
     const allocations = this.dbg.memory.drainAllocationsLog()
     const packet = {
       allocations: []
     };
 
-    for (let stack of allocations) {
+    for (let { frame: stack } of allocations) {
       if (stack && Cu.isDeadWrapper(stack)) {
         continue;
       }
 
       // Safe because SavedFrames are frozen/immutable.
       let waived = Cu.waiveXrays(stack);
 
       // Ensure that we have a form, count, and index for new allocations