Merge mozilla-inbound to mozilla-central. a=merge
authorCosmin Sabou <csabou@mozilla.com>
Mon, 23 Jul 2018 12:30:50 +0300
changeset 427753 143984185dcece46031c970179ddea4837a6c01d
parent 427752 061213ccc62a5898157c50ab3c246314e057e45b (current diff)
parent 427742 e18b9018442b8000ee91777a0cf71710247bcfef (diff)
child 427754 1cf8b6aeaa61b106a2159fd449375556a87d1fe7
child 427778 867794e06da1b1c19e20b7e74b5e560559162d9e
push id105528
push usercsabou@mozilla.com
push dateMon, 23 Jul 2018 09:34:02 +0000
treeherdermozilla-inbound@1cf8b6aeaa61 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmerge
milestone63.0a1
first release with
nightly linux32
143984185dce / 63.0a1 / 20180723100101 / files
nightly linux64
143984185dce / 63.0a1 / 20180723100101 / files
nightly mac
143984185dce / 63.0a1 / 20180723100101 / files
nightly win32
143984185dce / 63.0a1 / 20180723100101 / files
nightly win64
143984185dce / 63.0a1 / 20180723100101 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Merge mozilla-inbound to mozilla-central. a=merge
testing/web-platform/tests/css/css-logical/animation-003.tenative.html
--- a/devtools/server/actors/moz.build
+++ b/devtools/server/actors/moz.build
@@ -6,16 +6,17 @@
 
 DIRS += [
     'addon',
     'canvas',
     'emulation',
     'highlighters',
     'inspector',
     'object',
+    'replay',
     'targets',
     'utils',
     'webconsole',
     'worker',
 ]
 
 DevToolsModules(
     'accessibility-parent.js',
new file mode 100644
--- /dev/null
+++ b/devtools/server/actors/replay/debugger.js
@@ -0,0 +1,673 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
+/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+/* eslint-disable spaced-comment, brace-style, indent-legacy */
+
+// When recording/replaying an execution with Web Replay, Devtools server code
+// runs in the middleman process instead of the recording/replaying process the
+// code is interested in.
+//
+// This file defines replay objects analogous to those constructed by the
+// C++ Debugger (Debugger, Debugger.Object, etc.), which implement similar
+// methods and properties to those C++ objects. These replay objects are
+// created in the middleman process, and describe things that exist in the
+// recording/replaying process, inspecting them via the RecordReplayControl
+// interface.
+
+"use strict";
+
+const RecordReplayControl = require("RecordReplayControl");
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebugger
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebugger() {
+  RecordReplayControl.registerReplayDebugger(this);
+
+  // All breakpoints (per BreakpointPosition) installed by this debugger.
+  this._breakpoints = [];
+
+  // All ReplayDebuggerFramees that have been created while paused at the
+  // current position, indexed by their index (zero is the oldest frame, with
+  // the index increasing for newer frames). These are invalidated when
+  // unpausing.
+  this._frames = [];
+
+  // All ReplayDebuggerObjects and ReplayDebuggerEnvironments that have been
+  // created while paused at the current position, indexed by their id. These
+  // are invalidated when unpausing.
+  this._objects = [];
+
+  // All ReplayDebuggerScripts and ReplayDebuggerScriptSources that have been
+  // created, indexed by their id. These stay valid even after unpausing.
+  this._scripts = [];
+  this._scriptSources = [];
+}
+
+// Frame index used to refer to the newest frame in the child process.
+const NewestFrameIndex = -1;
+
+ReplayDebugger.prototype = {
+
+  /////////////////////////////////////////////////////////
+  // General methods
+  /////////////////////////////////////////////////////////
+
+  replaying: true,
+
+  canRewind: RecordReplayControl.canRewind,
+  replayResumeBackward() { RecordReplayControl.resume(/* forward = */ false); },
+  replayResumeForward() { RecordReplayControl.resume(/* forward = */ true); },
+  replayPause: RecordReplayControl.pause,
+
+  addDebuggee() {},
+  removeAllDebuggees() {},
+
+  replayingContent(url) {
+    return this._sendRequest({ type: "getContent", url });
+  },
+
+  _sendRequest(request) {
+    const data = RecordReplayControl.sendRequest(request);
+    //dump("SendRequest: " +
+    //     JSON.stringify(request) + " -> " + JSON.stringify(data) + "\n");
+    if (data.exception) {
+      ThrowError(data.exception);
+    }
+    return data;
+  },
+
+  _setBreakpoint(handler, position, data) {
+    const id = RecordReplayControl.setBreakpoint(handler, position);
+    this._breakpoints.push({id, position, data});
+  },
+
+  _clearMatchingBreakpoints(callback) {
+    this._breakpoints = this._breakpoints.filter(breakpoint => {
+      if (callback(breakpoint)) {
+        RecordReplayControl.clearBreakpoint(breakpoint.id);
+        return false;
+      }
+      return true;
+    });
+  },
+
+  _searchBreakpoints(callback) {
+    for (const breakpoint of this._breakpoints) {
+      const v = callback(breakpoint);
+      if (v) {
+        return v;
+      }
+    }
+    return undefined;
+  },
+
+  // This is called on all ReplayDebuggers whenever the child process is about
+  // to unpause. Clear out all data that is invalidated as a result.
+  invalidateAfterUnpause() {
+    this._frames.forEach(frame => {
+      if (frame) {
+        frame._invalidate();
+      }
+    });
+    this._frames.length = 0;
+
+    this._objects.forEach(obj => obj._invalidate());
+    this._objects.length = 0;
+  },
+
+  /////////////////////////////////////////////////////////
+  // Script methods
+  /////////////////////////////////////////////////////////
+
+  _getScript(id) {
+    if (!id) {
+      return null;
+    }
+    const rv = this._scripts[id];
+    if (rv) {
+      return rv;
+    }
+    return this._addScript(this._sendRequest({ type: "getScript", id }));
+  },
+
+  _addScript(data) {
+    if (!this._scripts[data.id]) {
+      this._scripts[data.id] = new ReplayDebuggerScript(this, data);
+    }
+    return this._scripts[data.id];
+  },
+
+  findScripts() {
+    // Note: Debugger's findScripts() method takes a query argument, which
+    // we ignore here.
+    const data = this._sendRequest({ type: "findScripts" });
+    return data.map(script => this._addScript(script));
+  },
+
+  /////////////////////////////////////////////////////////
+  // ScriptSource methods
+  /////////////////////////////////////////////////////////
+
+  _getSource(id) {
+    if (!this._scriptSources[id]) {
+      const data = this._sendRequest({ type: "getSource", id });
+      this._scriptSources[id] = new ReplayDebuggerScriptSource(this, data);
+    }
+    return this._scriptSources[id];
+  },
+
+  /////////////////////////////////////////////////////////
+  // Object methods
+  /////////////////////////////////////////////////////////
+
+  _getObject(id) {
+    if (id && !this._objects[id]) {
+      const data = this._sendRequest({ type: "getObject", id });
+      switch (data.kind) {
+      case "Object":
+        this._objects[id] = new ReplayDebuggerObject(this, data);
+        break;
+      case "Environment":
+        this._objects[id] = new ReplayDebuggerEnvironment(this, data);
+        break;
+      default:
+        ThrowError("Unknown object kind");
+      }
+    }
+    return this._objects[id];
+  },
+
+  _convertValue(value) {
+    if (value && typeof value == "object") {
+      if (value.object) {
+        return this._getObject(value.object);
+      } else if (value.special == "undefined") {
+        return undefined;
+      } else if (value.special == "NaN") {
+        return NaN;
+      } else if (value.special == "Infinity") {
+        return Infinity;
+      } else if (value.special == "-Infinity") {
+        return -Infinity;
+      }
+    }
+    return value;
+  },
+
+  _convertCompletionValue(value) {
+    if ("return" in value) {
+      return { return: this._convertValue(value.return) };
+    }
+    if ("throw" in value) {
+      return { throw: this._convertValue(value.throw) };
+    }
+    ThrowError("Unexpected completion value");
+    return null; // For eslint
+  },
+
+  /////////////////////////////////////////////////////////
+  // Frame methods
+  /////////////////////////////////////////////////////////
+
+  _getFrame(index) {
+    if (index == NewestFrameIndex) {
+      if (this._frames.length) {
+        return this._frames[this._frames.length - 1];
+      }
+    } else {
+      assert(index < this._frames.length);
+      if (this._frames[index]) {
+        return this._frames[index];
+      }
+    }
+
+    const data = this._sendRequest({ type: "getFrame", index });
+
+    if (index == NewestFrameIndex) {
+      if ("index" in data) {
+        index = data.index;
+      } else {
+        // There are no frames on the stack.
+        return null;
+      }
+
+      // Fill in the older frames.
+      while (index >= this._frames.length) {
+        this._frames.push(null);
+      }
+    }
+
+    this._frames[index] = new ReplayDebuggerFrame(this, data);
+    return this._frames[index];
+  },
+
+  getNewestFrame() {
+    return this._getFrame(NewestFrameIndex);
+  },
+
+  get onNewScript() {
+    return this._searchBreakpoints(({position, data}) => {
+      return position.kind == "NewScript" ? data : null;
+    });
+  },
+
+  set onNewScript(handler) {
+    if (handler) {
+      this._setBreakpoint(() => {
+        const script = this._sendRequest({ type: "getNewScript" });
+        const debugScript = this._addScript(script);
+        handler.call(this, debugScript);
+      }, { kind: "NewScript" }, handler);
+    } else {
+      this._clearMatchingBreakpoints(({position}) => position.kind == "NewScript");
+    }
+  },
+
+  get onEnterFrame() {
+    return this._searchBreakpoints(({position, data}) => {
+      return position.kind == "EnterFrame" ? data : null;
+    });
+  },
+
+  set onEnterFrame(handler) {
+    if (handler) {
+      this._setBreakpoint(() => handler.call(this, this.getNewestFrame()),
+                          { kind: "EnterFrame" }, handler);
+    } else {
+      this._clearMatchingBreakpoints(({position}) => position.kind == "EnterFrame");
+    }
+  },
+
+  get replayingOnPopFrame() {
+    return this._searchBreakpoints(({position, data}) => {
+      return (position.kind == "OnPop" && !position.script) ? data : null;
+    });
+  },
+
+  set replayingOnPopFrame(handler) {
+    if (handler) {
+      this._setBreakpoint(() => handler.call(this, this.getNewestFrame()),
+                          { kind: "OnPop" }, handler);
+    } else {
+      this._clearMatchingBreakpoints(({position}) => {
+        return position.kind == "EnterFrame" && !position.script;
+      });
+    }
+  },
+
+  clearAllBreakpoints: NYI,
+
+}; // ReplayDebugger.prototype
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebuggerScript
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebuggerScript(dbg, data) {
+  this._dbg = dbg;
+  this._data = data;
+}
+
+ReplayDebuggerScript.prototype = {
+  get displayName() { return this._data.displayName; },
+  get url() { return this._data.url; },
+  get startLine() { return this._data.startLine; },
+  get lineCount() { return this._data.lineCount; },
+  get source() { return this._dbg._getSource(this._data.sourceId); },
+  get sourceStart() { return this._data.sourceStart; },
+  get sourceLength() { return this._data.sourceLength; },
+
+  _forward(type, value) {
+    return this._dbg._sendRequest({ type, id: this._data.id, value });
+  },
+
+  getLineOffsets(line) { return this._forward("getLineOffsets", line); },
+  getOffsetLocation(pc) { return this._forward("getOffsetLocation", pc); },
+  getSuccessorOffsets(pc) { return this._forward("getSuccessorOffsets", pc); },
+  getPredecessorOffsets(pc) { return this._forward("getPredecessorOffsets", pc); },
+
+  setBreakpoint(offset, handler) {
+    this._dbg._setBreakpoint(() => { handler.hit(this._dbg.getNewestFrame()); },
+                             { kind: "Break", script: this._data.id, offset },
+                             handler);
+  },
+
+  clearBreakpoint(handler) {
+    this._dbg._clearMatchingBreakpoints(({position, data}) => {
+      return position.script == this._data.id && handler == data;
+    });
+  },
+
+  get isGeneratorFunction() { NYI(); },
+  get isAsyncFunction() { NYI(); },
+  get format() { NYI(); },
+  getChildScripts: NYI,
+  getAllOffsets: NYI,
+  getAllColumnOffsets: NYI,
+  getBreakpoints: NYI,
+  clearAllBreakpoints: NYI,
+  isInCatchScope: NYI,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebuggerScriptSource
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebuggerScriptSource(dbg, data) {
+  this._dbg = dbg;
+  this._data = data;
+}
+
+ReplayDebuggerScriptSource.prototype = {
+  get text() { return this._data.text; },
+  get url() { return this._data.url; },
+  get displayURL() { return this._data.displayURL; },
+  get elementAttributeName() { return this._data.elementAttributeName; },
+  get introductionOffset() { return this._data.introductionOffset; },
+  get introductionType() { return this._data.introductionType; },
+  get sourceMapURL() { return this._data.sourceMapURL; },
+  get element() { return null; },
+
+  get introductionScript() {
+    return this._dbg._getScript(this._data.introductionScript);
+  },
+
+  get binary() { NYI(); },
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebuggerFrame
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebuggerFrame(dbg, data) {
+  this._dbg = dbg;
+  this._data = data;
+  if (this._data.arguments) {
+    this._data.arguments =
+      this._data.arguments.map(this._dbg._convertValue.bind(this._dbg));
+  }
+}
+
+ReplayDebuggerFrame.prototype = {
+  _invalidate() {
+    this._data = null;
+  },
+
+  get type() { return this._data.type; },
+  get callee() { return this._dbg._getObject(this._data.callee); },
+  get environment() { return this._dbg._getObject(this._data.environment); },
+  get generator() { return this._data.generator; },
+  get constructing() { return this._data.constructing; },
+  get this() { return this._dbg._convertValue(this._data.this); },
+  get script() { return this._dbg._getScript(this._data.script); },
+  get offset() { return this._data.offset; },
+  get arguments() { return this._data.arguments; },
+  get live() { return true; },
+
+  eval(text, options) {
+    const rv = this._dbg._sendRequest({ type: "frameEvaluate",
+                                        index: this._data.index, text, options });
+    return this._dbg._convertCompletionValue(rv);
+  },
+
+  _positionMatches(position, kind) {
+    return position.kind == kind
+        && position.script == this._data.script
+        && position.frameIndex == this._data.index;
+  },
+
+  get onStep() {
+    return this._dbg._searchBreakpoints(({position, data}) => {
+      return this._positionMatches(position, "OnStep") ? data : null;
+    });
+  },
+
+  set onStep(handler) {
+    if (handler) {
+      // Use setReplayingOnStep instead.
+      NotAllowed();
+    }
+    this._clearOnStepBreakpoints();
+  },
+
+  _clearOnStepBreakpoints() {
+    this._dbg._clearMatchingBreakpoints(
+      ({position}) => this._positionMatches(position, "OnStep")
+    );
+  },
+
+  setReplayingOnStep(handler, offsets) {
+    this._clearOnStepBreakpoints();
+    offsets.forEach(offset => {
+      this._dbg._setBreakpoint(
+        () => handler.call(this._dbg.getNewestFrame()),
+        { kind: "OnStep",
+          script: this._data.script,
+          offset,
+          frameIndex: this._data.index },
+        handler);
+    });
+  },
+
+  get onPop() {
+    return this._dbg._searchBreakpoints(({position, data}) => {
+      return this._positionMatches(position, "OnPop");
+    });
+  },
+
+  set onPop(handler) {
+    if (handler) {
+      this._dbg._setBreakpoint(() => {
+          const result = this._dbg._sendRequest({ type: "popFrameResult" });
+          handler.call(this._dbg.getNewestFrame(),
+                       this._dbg._convertCompletionValue(result));
+        },
+        { kind: "OnPop", script: this._data.script, frameIndex: this._data.index },
+        handler);
+    } else {
+      this._dbg._clearMatchingBreakpoints(
+        ({position}) => this._positionMatches(position, "OnPop")
+      );
+    }
+  },
+
+  get older() {
+    if (this._data.index == 0) {
+      // This is the oldest frame.
+      return null;
+    }
+    return this._dbg._getFrame(this._data.index - 1);
+  },
+
+  get implementation() { NYI(); },
+  evalWithBindings: NYI,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebuggerObject
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebuggerObject(dbg, data) {
+  this._dbg = dbg;
+  this._data = data;
+  this._properties = null;
+}
+
+ReplayDebuggerObject.prototype = {
+  _invalidate() {
+    this._data = null;
+    this._properties = null;
+  },
+
+  get callable() { return this._data.callable; },
+  get isBoundFunction() { return this._data.isBoundFunction; },
+  get isArrowFunction() { return this._data.isArrowFunction; },
+  get isGeneratorFunction() { return this._data.isGeneratorFunction; },
+  get isAsyncFunction() { return this._data.isAsyncFunction; },
+  get proto() { return this._dbg._getObject(this._data.proto); },
+  get class() { return this._data.class; },
+  get name() { return this._data.name; },
+  get displayName() { return this._data.displayName; },
+  get parameterNames() { return this._data.parameterNames; },
+  get script() { return this._dbg._getScript(this._data.script); },
+  get environment() { return this._dbg._getObject(this._data.environment); },
+  get boundTargetFunction() { return this.isBoundFunction ? NYI() : undefined; },
+  get boundThis() { return this.isBoundFunction ? NYI() : undefined; },
+  get boundArguments() { return this.isBoundFunction ? NYI() : undefined; },
+  get global() { return this._dbg._getObject(this._data.global); },
+  get isProxy() { return this._data.isProxy; },
+
+  isExtensible() { return this._data.isExtensible; },
+  isSealed() { return this._data.isSealed; },
+  isFrozen() { return this._data.isFrozen; },
+  unwrap() { return this.isProxy ? NYI() : this; },
+
+  unsafeDereference() {
+    // Direct access to the referent is not currently available.
+    return null;
+  },
+
+  getOwnPropertyNames() {
+    this._ensureProperties();
+    return Object.keys(this._properties);
+  },
+
+  getOwnPropertySymbols() {
+    // Symbol properties are not handled yet.
+    return [];
+  },
+
+  getOwnPropertyDescriptor(name) {
+    this._ensureProperties();
+    return this._properties[name];
+  },
+
+  _ensureProperties() {
+    if (!this._properties) {
+      const properties = this._dbg._sendRequest({
+        type: "getObjectProperties",
+        id: this._data.id
+      });
+      this._properties = {};
+      properties.forEach(({name, desc}) => {
+        if ("value" in desc) {
+          desc.value = this._dbg._convertValue(desc.value);
+        }
+        if ("get" in desc) {
+          desc.get = this._dbg._getObject(desc.get);
+        }
+        if ("set" in desc) {
+          desc.set = this._dbg._getObject(desc.set);
+        }
+        this._properties[name] = desc;
+      });
+    }
+  },
+
+  get allocationSite() { NYI(); },
+  get errorMessageName() { NYI(); },
+  get errorNotes() { NYI(); },
+  get errorLineNumber() { NYI(); },
+  get errorColumnNumber() { NYI(); },
+  get proxyTarget() { NYI(); },
+  get proxyHandler() { NYI(); },
+  get isPromise() { NYI(); },
+  call: NYI,
+  apply: NYI,
+  asEnvironment: NYI,
+  executeInGlobal: NYI,
+  executeInGlobalWithBindings: NYI,
+  makeDebuggeeValue: NYI,
+
+  preventExtensions: NotAllowed,
+  seal: NotAllowed,
+  freeze: NotAllowed,
+  defineProperty: NotAllowed,
+  defineProperties: NotAllowed,
+  deleteProperty: NotAllowed,
+  forceLexicalInitializationByName: NotAllowed,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// ReplayDebuggerEnvironment
+///////////////////////////////////////////////////////////////////////////////
+
+function ReplayDebuggerEnvironment(dbg, data) {
+  this._dbg = dbg;
+  this._data = data;
+  this._names = null;
+}
+
+ReplayDebuggerEnvironment.prototype = {
+  _invalidate() {
+    this._data = null;
+    this._names = null;
+  },
+
+  get type() { return this._data.type; },
+  get parent() { return this._dbg._getObject(this._data.parent); },
+  get object() { return this._dbg._getObject(this._data.object); },
+  get callee() { return this._dbg._getObject(this._data.callee); },
+  get optimizedOut() { return this._data.optimizedOut; },
+
+  _ensureNames() {
+    if (!this._names) {
+      const names =
+        this._dbg._sendRequest({ type: "getEnvironmentNames", id: this._data.id });
+      this._names = {};
+      names.forEach(({ name, value }) => {
+        this._names[name] = this._dbg._convertValue(value);
+      });
+    }
+  },
+
+  names() {
+    this._ensureNames();
+    return Object.keys(this._names);
+  },
+
+  getVariable(name) {
+    this._ensureNames();
+    return this._names[name];
+  },
+
+  get inspectable() {
+    // All ReplayDebugger environments are inspectable, as all compartments in
+    // the replayed process are considered to be debuggees.
+    return true;
+  },
+
+  find: NYI,
+  setVariable: NotAllowed,
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Utilities
+///////////////////////////////////////////////////////////////////////////////
+
+function NYI() {
+  ThrowError("Not yet implemented");
+}
+
+function NotAllowed() {
+  ThrowError("Not allowed");
+}
+
+function ThrowError(msg)
+{
+  const error = new Error(msg);
+  dump("ReplayDebugger Server Error: " + msg + " Stack: " + error.stack + "\n");
+  throw error;
+}
+
+function assert(v) {
+  if (!v) {
+    throw new Error("Assertion Failed!");
+  }
+}
+
+module.exports = ReplayDebugger;
new file mode 100644
--- /dev/null
+++ b/devtools/server/actors/replay/graphics.js
@@ -0,0 +1,67 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
+/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This module defines the routines used when updating the graphics shown by a
+// middleman process tab. Middleman processes have their own window/document
+// which are connected to the compositor in the UI process in the usual way.
+// We need to update the contents of the document to draw the raw graphics data
+// provided by the child process.
+
+"use strict";
+
+ChromeUtils.import("resource://gre/modules/Services.jsm");
+
+function updateWindow(window, buffer, width, height) {
+  // Make sure the window has a canvas filling the screen.
+  let canvas = window.middlemanCanvas;
+  if (!canvas) {
+    canvas = window.document.createElement("canvas");
+    window.document.body.style.margin = "0px";
+    window.document.body.insertBefore(canvas, window.document.body.firstChild);
+    window.middlemanCanvas = canvas;
+  }
+
+  canvas.width = width;
+  canvas.height = height;
+
+  // If there is a scale for this window, then the graphics will already have
+  // been scaled in the child process. To avoid scaling the graphics twice,
+  // transform the canvas to undo the scaling.
+  const scale = window.devicePixelRatio;
+  if (scale != 1) {
+    canvas.style.transform =
+      `scale(${ 1 / scale }) translate(-${ width / scale }px, -${ height / scale }px)`;
+  }
+
+  const graphicsData = new Uint8Array(buffer);
+  const imageData = canvas.getContext("2d").getImageData(0, 0, width, height);
+  imageData.data.set(graphicsData);
+  canvas.getContext("2d").putImageData(imageData, 0, 0);
+
+  // Make recording/replaying tabs easier to differentiate from other tabs.
+  window.document.title = "RECORD/REPLAY";
+}
+
+// Entry point for when we have some new graphics data from the child process
+// to draw.
+// eslint-disable-next-line no-unused-vars
+function Update(buffer, width, height) {
+  try {
+    // Paint to all windows we can find. Hopefully there is only one.
+    const windowEnumerator = Services.ww.getWindowEnumerator();
+    while (windowEnumerator.hasMoreElements()) {
+      const window = windowEnumerator.getNext().QueryInterface(Ci.nsIDOMWindow);
+      updateWindow(window, buffer, width, height);
+    }
+  } catch (e) {
+    dump("Middleman Graphics Update Exception: " + e + "\n");
+  }
+}
+
+// eslint-disable-next-line no-unused-vars
+var EXPORTED_SYMBOLS = [
+  "Update",
+];
new file mode 100644
--- /dev/null
+++ b/devtools/server/actors/replay/moz.build
@@ -0,0 +1,11 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+DevToolsModules(
+    'debugger.js',
+    'graphics.js',
+    'replay.js',
+)
new file mode 100644
--- /dev/null
+++ b/devtools/server/actors/replay/replay.js
@@ -0,0 +1,598 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2; js-indent-level: 2 -*- */
+/* vim: set ft=javascript ts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+/* eslint-disable spaced-comment, brace-style, indent-legacy */
+
+// This file defines the logic that runs in the record/replay devtools sandbox.
+// This code is loaded into all recording/replaying processes, and responds to
+// requests and other instructions from the middleman via the exported symbols
+// defined at the end of this file.
+//
+// Like all other JavaScript in the recording/replaying process, this code's
+// state is included in memory snapshots and reset when checkpoints are
+// restored. In the process of handling the middleman's requests, however, its
+// state may vary between recording and replaying, or between different
+// replays. As a result, we have to be very careful about performing operations
+// that might interact with the recording --- any time we enter the debuggee
+// and evaluate code or perform other operations.
+// The RecordReplayControl.maybeDivergeFromRecording function should be used at
+// any point where such interactions might occur.
+// eslint-disable spaced-comment
+
+"use strict";
+
+const CC = Components.Constructor;
+
+// Create a sandbox with the resources we need. require() doesn't work here.
+const sandbox = Cu.Sandbox(CC("@mozilla.org/systemprincipal;1", "nsIPrincipal")());
+Cu.evalInSandbox(
+  "Components.utils.import('resource://gre/modules/jsdebugger.jsm');" +
+  "addDebuggerToGlobal(this);",
+  sandbox
+);
+const Debugger = sandbox.Debugger;
+const RecordReplayControl = sandbox.RecordReplayControl;
+
+const dbg = new Debugger();
+
+// We are interested in debugging all globals in the process.
+dbg.onNewGlobalObject = function(global) {
+  dbg.addDebuggee(global);
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Utilities
+///////////////////////////////////////////////////////////////////////////////
+
+function assert(v) {
+  if (!v) {
+    RecordReplayControl.dump("Assertion Failed: " + (new Error()).stack + "\n");
+    throw new Error("Assertion Failed!");
+  }
+}
+
+// Bidirectional map between objects and IDs.
+function IdMap() {
+  this._idToObject = [ undefined ];
+  this._objectToId = new Map();
+}
+
+IdMap.prototype = {
+  add(object) {
+    assert(object && !this._objectToId.has(object));
+    const id = this._idToObject.length;
+    this._idToObject.push(object);
+    this._objectToId.set(object, id);
+    return id;
+  },
+
+  getId(object) {
+    const id = this._objectToId.get(object);
+    return (id === undefined) ? 0 : id;
+  },
+
+  getObject(id) {
+    return this._idToObject[id];
+  },
+
+  forEach(callback) {
+    for (let i = 1; i < this._idToObject.length; i++) {
+      callback(i, this._idToObject[i]);
+    }
+  },
+
+  lastId() {
+    return this._idToObject.length - 1;
+  },
+};
+
+function countScriptFrames() {
+  let count = 0;
+  let frame = dbg.getNewestFrame();
+  while (frame) {
+    if (considerScript(frame.script)) {
+      count++;
+    }
+    frame = frame.older;
+  }
+  return count;
+}
+
+function scriptFrameForIndex(index) {
+  let indexFromTop = countScriptFrames() - 1 - index;
+  let frame = dbg.getNewestFrame();
+  while (true) {
+    if (considerScript(frame.script)) {
+      if (indexFromTop-- == 0) {
+        break;
+      }
+    }
+    frame = frame.older;
+  }
+  return frame;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Persistent State
+///////////////////////////////////////////////////////////////////////////////
+
+// Association between Debugger.Scripts and their IDs. The indices that this
+// table assigns to scripts are stable across the entire recording, even though
+// this table (like all JS state) is included in snapshots, rolled back when
+// rewinding, and so forth.  In debuggee time, this table only grows (there is
+// no way to remove entries). Scripts created for debugger activity (e.g. eval)
+// are ignored, and off thread compilation is disabled, so this table acquires
+// the same scripts in the same order as we roll back and run forward in the
+// recording.
+const gScripts = new IdMap();
+
+function addScript(script) {
+  gScripts.add(script);
+  script.getChildScripts().forEach(addScript);
+}
+
+// Association between Debugger.ScriptSources and their IDs. As for gScripts,
+// the indices assigned to a script source are consistent across all replays
+// and rewinding.
+const gScriptSources = new IdMap();
+
+function addScriptSource(source) {
+  gScriptSources.add(source);
+}
+
+function considerScript(script) {
+  return script.url
+      && !script.url.startsWith("resource:")
+      && !script.url.startsWith("chrome:");
+}
+
+dbg.onNewScript = function(script) {
+  if (RecordReplayControl.areThreadEventsDisallowed()) {
+    // This script is part of an eval on behalf of the debugger.
+    return;
+  }
+
+  if (!considerScript(script)) {
+    return;
+  }
+
+  addScript(script);
+  addScriptSource(script.source);
+
+  // Each onNewScript call advances the progress counter, to preserve the
+  // ProgressCounter invariant when onNewScript is called multiple times
+  // without executing any scripts.
+  RecordReplayControl.advanceProgressCounter();
+
+  if (gHasNewScriptHandler) {
+    RecordReplayControl.positionHit({ kind: "NewScript" });
+  }
+
+  // Check in case any handlers we need to install are on the scripts just
+  // created.
+  installPendingHandlers();
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Position Handler State
+///////////////////////////////////////////////////////////////////////////////
+
+// Whether there is a position handler for NewScript.
+let gHasNewScriptHandler = false;
+
+// Whether there is a position handler for EnterFrame.
+let gHasEnterFrameHandler = false;
+
+// Handlers we tried to install but couldn't due to a script not existing.
+// Breakpoints requested by the middleman --- which are preserved when
+// restoring earlier checkpoints --- identify target scripts by their stable ID
+// in gScripts. This array holds the breakpoints for scripts whose IDs we know
+// but which have not been created yet.
+const gPendingPcHandlers = [];
+
+// Script/offset pairs where we have installed a breakpoint handler. We have to
+// avoid installing duplicate handlers here because they will both be called.
+const gInstalledPcHandlers = [];
+
+// Callbacks to test whether a frame should have an OnPop handler.
+const gOnPopFilters = [];
+
+// eslint-disable-next-line no-unused-vars
+function ClearPositionHandlers() {
+  dbg.clearAllBreakpoints();
+  dbg.onEnterFrame = undefined;
+
+  gHasNewScriptHandler = false;
+  gHasEnterFrameHandler = false;
+  gPendingPcHandlers.length = 0;
+  gInstalledPcHandlers.length = 0;
+  gOnPopFilters.length = 0;
+}
+
+function installPendingHandlers() {
+  const pending = gPendingPcHandlers.map(position => position);
+  gPendingPcHandlers.length = 0;
+
+  pending.forEach(EnsurePositionHandler);
+}
+
+// The completion state of any frame that is being popped.
+let gPopFrameResult = null;
+
+function onPopFrame(completion) {
+  gPopFrameResult = completion;
+  RecordReplayControl.positionHit({
+    kind: "OnPop",
+    script: gScripts.getId(this.script),
+    frameIndex: countScriptFrames() - 1,
+  });
+  gPopFrameResult = null;
+}
+
+function onEnterFrame(frame) {
+  if (gHasEnterFrameHandler) {
+    RecordReplayControl.positionHit({ kind: "EnterFrame" });
+  }
+
+  if (considerScript(frame.script)) {
+    gOnPopFilters.forEach(filter => {
+      if (filter(frame)) {
+        frame.onPop = onPopFrame;
+      }
+    });
+  }
+}
+
+function addOnPopFilter(filter) {
+  let frame = dbg.getNewestFrame();
+  while (frame) {
+    if (considerScript(frame.script) && filter(frame)) {
+      frame.onPop = onPopFrame;
+    }
+    frame = frame.older;
+  }
+
+  gOnPopFilters.push(filter);
+  dbg.onEnterFrame = onEnterFrame;
+}
+
+function EnsurePositionHandler(position) {
+  switch (position.kind) {
+  case "Break":
+  case "OnStep":
+    let debugScript;
+    if (position.script) {
+      debugScript = gScripts.getObject(position.script);
+      if (!debugScript) {
+        // The script referred to in this position does not exist yet, so we
+        // can't install a handler for it. Add a pending handler so that we
+        // can install the handler once the script is created.
+        gPendingPcHandlers.push(position);
+        return;
+      }
+    }
+
+    const match = function({script, offset}) {
+      return script == position.script && offset == position.offset;
+    };
+    if (gInstalledPcHandlers.some(match)) {
+      return;
+    }
+    gInstalledPcHandlers.push({ script: position.script, offset: position.offset });
+
+    debugScript.setBreakpoint(position.offset, {
+      hit() {
+        RecordReplayControl.positionHit({
+          kind: "OnStep",
+          script: position.script,
+          offset: position.offset,
+          frameIndex: countScriptFrames() - 1,
+        });
+      }
+    });
+    break;
+  case "OnPop":
+    if (position.script) {
+      addOnPopFilter(frame => gScripts.getId(frame.script) == position.script);
+    } else {
+      addOnPopFilter(frame => true);
+    }
+    break;
+  case "EnterFrame":
+    gHasEnterFrameHandler = true;
+    dbg.onEnterFrame = onEnterFrame;
+    break;
+  case "NewScript":
+    gHasNewScriptHandler = true;
+    break;
+  }
+}
+
+// eslint-disable-next-line no-unused-vars
+function GetEntryPosition(position) {
+  if (position.kind == "Break" || position.kind == "OnStep") {
+    const script = gScripts.getObject(position.script);
+    if (script) {
+      return {
+        kind: "Break",
+        script: position.script,
+        offset: script.mainOffset,
+      };
+    }
+  }
+  return null;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Paused State
+///////////////////////////////////////////////////////////////////////////////
+
+let gPausedObjects = new IdMap();
+
+function getObjectId(obj) {
+  const id = gPausedObjects.getId(obj);
+  if (!id && obj) {
+    assert((obj instanceof Debugger.Object) ||
+           (obj instanceof Debugger.Environment));
+    return gPausedObjects.add(obj);
+  }
+  return id;
+}
+
+function convertValue(value) {
+  if (value instanceof Debugger.Object) {
+    return { object: getObjectId(value) };
+  }
+  if (value === undefined) {
+    return { special: "undefined" };
+  }
+  if (value !== value) { // eslint-disable-line no-self-compare
+    return { special: "NaN" };
+  }
+  if (value == Infinity) {
+    return { special: "Infinity" };
+  }
+  if (value == -Infinity) {
+    return { special: "-Infinity" };
+  }
+  return value;
+}
+
+function convertCompletionValue(value) {
+  if ("return" in value) {
+    return { return: convertValue(value.return) };
+  }
+  if ("throw" in value) {
+    return { throw: convertValue(value.throw) };
+  }
+  throw new Error("Unexpected completion value");
+}
+
+// eslint-disable-next-line no-unused-vars
+function ClearPausedState() {
+  gPausedObjects = new IdMap();
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Handler Helpers
+///////////////////////////////////////////////////////////////////////////////
+
+function getScriptData(id) {
+  const script = gScripts.getObject(id);
+  return {
+    id,
+    sourceId: gScriptSources.getId(script.source),
+    startLine: script.startLine,
+    lineCount: script.lineCount,
+    sourceStart: script.sourceStart,
+    sourceLength: script.sourceLength,
+    displayName: script.displayName,
+    url: script.url,
+  };
+}
+
+function forwardToScript(name) {
+  return request => gScripts.getObject(request.id)[name](request.value);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Handlers
+///////////////////////////////////////////////////////////////////////////////
+
+const gRequestHandlers = {
+
+  findScripts(request) {
+    const rv = [];
+    gScripts.forEach((id) => {
+      rv.push(getScriptData(id));
+    });
+    return rv;
+  },
+
+  getScript(request) {
+    return getScriptData(request.id);
+  },
+
+  getNewScript(request) {
+    return getScriptData(gScripts.lastId());
+  },
+
+  getContent(request) {
+    return RecordReplayControl.getContent(request.url);
+  },
+
+  getSource(request) {
+    const source = gScriptSources.getObject(request.id);
+    const introductionScript = gScripts.getId(source.introductionScript);
+    return {
+      id: request.id,
+      text: source.text,
+      url: source.url,
+      displayURL: source.displayURL,
+      elementAttributeName: source.elementAttributeName,
+      introductionScript,
+      introductionOffset: introductionScript ? source.introductionOffset : undefined,
+      introductionType: source.introductionType,
+      sourceMapURL: source.sourceMapURL,
+    };
+  },
+
+  getObject(request) {
+    const object = gPausedObjects.getObject(request.id);
+    if (object instanceof Debugger.Object) {
+      return {
+        id: request.id,
+        kind: "Object",
+        callable: object.callable,
+        isBoundFunction: object.isBoundFunction,
+        isArrowFunction: object.isArrowFunction,
+        isGeneratorFunction: object.isGeneratorFunction,
+        isAsyncFunction: object.isAsyncFunction,
+        proto: getObjectId(object.proto),
+        class: object.class,
+        name: object.name,
+        displayName: object.displayName,
+        parameterNames: object.parameterNames,
+        script: gScripts.getId(object.script),
+        environment: getObjectId(object.environment),
+        global: getObjectId(object.global),
+        isProxy: object.isProxy,
+        isExtensible: object.isExtensible(),
+        isSealed: object.isSealed(),
+        isFrozen: object.isFrozen(),
+      };
+    }
+    if (object instanceof Debugger.Environment) {
+      return {
+        id: request.id,
+        kind: "Environment",
+        type: object.type,
+        parent: getObjectId(object.parent),
+        object: object.type == "declarative" ? 0 : getObjectId(object.object),
+        callee: getObjectId(object.callee),
+        optimizedOut: object.optimizedOut,
+      };
+    }
+    throw new Error("Unknown object kind");
+  },
+
+  getObjectProperties(request) {
+    if (!RecordReplayControl.maybeDivergeFromRecording()) {
+      return [{
+        name: "Unknown properties",
+        desc: {
+          value: "Recording divergence in getObjectProperties",
+          enumerable: true
+        },
+      }];
+    }
+
+    const object = gPausedObjects.getObject(request.id);
+    const names = object.getOwnPropertyNames();
+
+    return names.map(name => {
+      const desc = object.getOwnPropertyDescriptor(name);
+      if ("value" in desc) {
+        desc.value = convertValue(desc.value);
+      }
+      if ("get" in desc) {
+        desc.get = getObjectId(desc.get);
+      }
+      if ("set" in desc) {
+        desc.set = getObjectId(desc.set);
+      }
+      return { name, desc };
+    });
+  },
+
+  getEnvironmentNames(request) {
+    if (!RecordReplayControl.maybeDivergeFromRecording()) {
+      return [{name: "Unknown names",
+               value: "Recording divergence in getEnvironmentNames" }];
+    }
+
+    const env = gPausedObjects.getObject(request.id);
+    const names = env.names();
+
+    return names.map(name => {
+      return { name, value: convertValue(env.getVariable(name)) };
+    });
+  },
+
+  getFrame(request) {
+    if (request.index == -1 /* NewestFrameIndex */) {
+      const numFrames = countScriptFrames();
+      if (!numFrames) {
+        // Return an empty object when there are no frames.
+        return {};
+      }
+      request.index = numFrames - 1;
+    }
+
+    const frame = scriptFrameForIndex(request.index);
+
+    let _arguments = null;
+    if (frame.arguments) {
+      _arguments = [];
+      for (let i = 0; i < frame.arguments.length; i++) {
+        _arguments.push(convertValue(frame.arguments[i]));
+      }
+    }
+
+    return {
+      index: request.index,
+      type: frame.type,
+      callee: getObjectId(frame.callee),
+      environment: getObjectId(frame.environment),
+      generator: frame.generator,
+      constructing: frame.constructing,
+      this: convertValue(frame.this),
+      script: gScripts.getId(frame.script),
+      offset: frame.offset,
+      arguments: _arguments,
+    };
+  },
+
+  getLineOffsets: forwardToScript("getLineOffsets"),
+  getOffsetLocation: forwardToScript("getOffsetLocation"),
+  getSuccessorOffsets: forwardToScript("getSuccessorOffsets"),
+  getPredecessorOffsets: forwardToScript("getPredecessorOffsets"),
+
+  frameEvaluate(request) {
+    if (!RecordReplayControl.maybeDivergeFromRecording()) {
+      return { throw: "Recording divergence in frameEvaluate" };
+    }
+
+    const frame = scriptFrameForIndex(request.index);
+    const rv = frame.eval(request.text, request.options);
+    return convertCompletionValue(rv);
+  },
+
+  popFrameResult(request) {
+    return gPopFrameResult ? convertCompletionValue(gPopFrameResult) : {};
+  },
+};
+
+// eslint-disable-next-line no-unused-vars
+function ProcessRequest(request) {
+  try {
+    if (gRequestHandlers[request.type]) {
+      return gRequestHandlers[request.type](request);
+    }
+    return { exception: "No handler for " + request.type };
+  } catch (e) {
+    RecordReplayControl.dump("ReplayDebugger Record/Replay Error: " + e + "\n");
+    return { exception: "" + e };
+  }
+}
+
+// eslint-disable-next-line no-unused-vars
+var EXPORTED_SYMBOLS = [
+  "EnsurePositionHandler",
+  "ClearPositionHandlers",
+  "ClearPausedState",
+  "ProcessRequest",
+  "GetEntryPosition",
+];
--- a/dom/clients/manager/ClientManagerService.cpp
+++ b/dom/clients/manager/ClientManagerService.cpp
@@ -600,17 +600,18 @@ public:
       targetProcess = mSourceProcess;
     }
 
     // Otherwise, use our normal remote process selection mechanism for
     // opening the window.  This will start a process if one is not
     // present.
     if (!targetProcess) {
       targetProcess =
-        ContentParent::GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
+        ContentParent::GetNewOrUsedBrowserProcess(nullptr,
+                                                  NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
                                                   ContentParent::GetInitialProcessPriority(nullptr),
                                                   nullptr);
     }
 
     // But starting a process can failure for any number of reasons. Reject the
     // promise if we could not.
     if (!targetProcess) {
       mPromise->Reject(NS_ERROR_ABORT, __func__);
--- a/dom/interfaces/base/nsITabParent.idl
+++ b/dom/interfaces/base/nsITabParent.idl
@@ -108,9 +108,15 @@ interface nsITabParent : nsISupports
                              in nsViewID aScrollId, in uint32_t aPresShellId);
 
   /**
    * Notify APZ to stop autoscrolling.
    * aScrollId and aPresShellId identify the scroll frame that is being
    * autoscrolled.
    */
   void stopApzAutoscroll(in nsViewID aScrollId, in uint32_t aPresShellId);
+
+  /**
+   * Save a recording of the associated content process' behavior to the
+   * specified filename. Returns whether the process is being recorded.
+   */
+  bool saveRecording(in AString aFileName);
 };
--- a/dom/ipc/ContentChild.cpp
+++ b/dom/ipc/ContentChild.cpp
@@ -72,16 +72,17 @@
 #include "mozilla/loader/ScriptCacheActors.h"
 #include "mozilla/net/NeckoChild.h"
 #include "mozilla/net/CookieServiceChild.h"
 #include "mozilla/net/CaptivePortalService.h"
 #include "mozilla/PerformanceMetricsCollector.h"
 #include "mozilla/PerformanceUtils.h"
 #include "mozilla/plugins/PluginInstanceParent.h"
 #include "mozilla/plugins/PluginModuleParent.h"
+#include "mozilla/recordreplay/ParentIPC.h"
 #include "mozilla/widget/ScreenManager.h"
 #include "mozilla/widget/WidgetMessageUtils.h"
 #include "nsBaseDragService.h"
 #include "mozilla/media/MediaChild.h"
 #include "mozilla/BasePrincipal.h"
 #include "mozilla/WebBrowserPersistDocumentChild.h"
 #include "mozilla/HangDetails.h"
 #include "imgLoader.h"
@@ -668,16 +669,22 @@ ContentChild::Init(MessageLoop* aIOLoop,
   // Once we start sending IPC messages, we need the thread manager to be
   // initialized so we can deal with the responses. Do that here before we
   // try to construct the crash reporter.
   nsresult rv = nsThreadManager::get().Init();
   if (NS_WARN_IF(NS_FAILED(rv))) {
     return false;
   }
 
+  // Middleman processes use a special channel for forwarding messages to
+  // their own children.
+  if (recordreplay::IsMiddleman()) {
+    SetMiddlemanIPCChannel(recordreplay::parent::ChannelToUIProcess());
+  }
+
   if (!Open(aChannel, aParentPid, aIOLoop)) {
     return false;
   }
   sSingleton = this;
 
   // If communications with the parent have broken down, take the process
   // down so it's not hanging around.
   GetIPCChannel()->SetAbortOnError(true);
@@ -3816,16 +3823,23 @@ ContentChild::RecvResumeInputEventQueue(
 
 mozilla::ipc::IPCResult
 ContentChild::RecvAddDynamicScalars(nsTArray<DynamicScalarDefinition>&& aDefs)
 {
   TelemetryIPC::AddDynamicScalarDefinitions(aDefs);
   return IPC_OK();
 }
 
+mozilla::ipc::IPCResult
+ContentChild::RecvSaveRecording(const FileDescriptor& aFile)
+{
+  recordreplay::parent::SaveRecording(aFile);
+  return IPC_OK();
+}
+
 already_AddRefed<nsIEventTarget>
 ContentChild::GetSpecificMessageEventTarget(const Message& aMsg)
 {
   switch(aMsg.type()) {
     // Javascript
     case PJavaScript::Msg_DropTemporaryStrongReferences__ID:
     case PJavaScript::Msg_DropObject__ID:
 
--- a/dom/ipc/ContentChild.h
+++ b/dom/ipc/ContentChild.h
@@ -731,16 +731,19 @@ public:
 
   virtual mozilla::ipc::IPCResult
   RecvPClientOpenWindowOpConstructor(PClientOpenWindowOpChild* aActor,
                                      const ClientOpenWindowArgs& aArgs) override;
 
   virtual bool
   DeallocPClientOpenWindowOpChild(PClientOpenWindowOpChild* aActor) override;
 
+  mozilla::ipc::IPCResult
+  RecvSaveRecording(const FileDescriptor& aFile) override;
+
 #ifdef NIGHTLY_BUILD
   // Fetch the current number of pending input events.
   //
   // NOTE: This method performs an atomic read, and is safe to call from all threads.
   uint32_t
   GetPendingInputEvents()
   {
     return mPendingInputEvents;
--- a/dom/ipc/ContentParent.cpp
+++ b/dom/ipc/ContentParent.cpp
@@ -89,16 +89,17 @@
 #include "mozilla/Move.h"
 #include "mozilla/net/NeckoParent.h"
 #include "mozilla/net/CookieServiceParent.h"
 #include "mozilla/net/PCookieServiceParent.h"
 #include "mozilla/plugins/PluginBridge.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/ProcessHangMonitor.h"
 #include "mozilla/ProcessHangMonitorIPC.h"
+#include "mozilla/recordreplay/ParentIPC.h"
 #include "mozilla/Scheduler.h"
 #include "mozilla/ScopeExit.h"
 #include "mozilla/ScriptPreloader.h"
 #include "mozilla/Services.h"
 #include "mozilla/StaticPtr.h"
 #include "mozilla/StaticPrefs.h"
 #include "mozilla/Telemetry.h"
 #include "mozilla/TelemetryIPC.h"
@@ -111,16 +112,18 @@
 #include "nsCDefaultURIFixup.h"
 #include "nsCExternalHandlerService.h"
 #include "nsCOMPtr.h"
 #include "nsChromeRegistryChrome.h"
 #include "nsConsoleMessage.h"
 #include "nsConsoleService.h"
 #include "nsContentUtils.h"
 #include "nsDebugImpl.h"
+#include "nsDirectoryServiceDefs.h"
+#include "nsEmbedCID.h"
 #include "nsFrameLoader.h"
 #include "nsFrameMessageManager.h"
 #include "nsHashPropertyBag.h"
 #include "nsIAlertsService.h"
 #include "nsIClipboard.h"
 #include "nsICookie.h"
 #include "nsContentPermissionHelper.h"
 #include "nsIContentProcess.h"
@@ -138,16 +141,17 @@
 #include "nsIInterfaceRequestorUtils.h"
 #include "nsIMemoryInfoDumper.h"
 #include "nsIMemoryReporter.h"
 #include "nsIMozBrowserFrame.h"
 #include "nsIMutable.h"
 #include "nsIObserverService.h"
 #include "nsIParentChannel.h"
 #include "nsIPresShell.h"
+#include "nsIPromptService.h"
 #include "nsIRemoteWindowContext.h"
 #include "nsIScriptError.h"
 #include "nsIScriptSecurityManager.h"
 #include "nsISiteSecurityService.h"
 #include "nsISound.h"
 #include "nsISpellChecker.h"
 #include "nsIStringBundle.h"
 #include "nsISupportsPrimitives.h"
@@ -603,17 +607,19 @@ static const char* sObserverTopics[] = {
 
 // PreallocateProcess is called by the PreallocatedProcessManager.
 // ContentParent then takes this process back within GetNewOrUsedBrowserProcess.
 /*static*/ already_AddRefed<ContentParent>
 ContentParent::PreallocateProcess()
 {
   RefPtr<ContentParent> process =
     new ContentParent(/* aOpener = */ nullptr,
-                      NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE));
+                      NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
+                      eNotRecordingOrReplaying,
+                      /* aRecordingFile = */ EmptyString());
 
   PreallocatedProcessManager::AddBlocker(process);
 
   if (!process->LaunchSubprocess(PROCESS_PRIORITY_PREALLOC)) {
     return nullptr;
   }
 
   return process.forget();
@@ -759,28 +765,65 @@ ContentParent::MinTabSelect(const nsTArr
         min = tabCount;
       }
     }
   }
 
   return candidate.forget();
 }
 
+static bool
+CreateTemporaryRecordingFile(nsAString& aResult)
+{
+  unsigned long elapsed = (TimeStamp::Now() - TimeStamp::ProcessCreation()).ToMilliseconds();
+
+  nsCOMPtr<nsIFile> file;
+  return !NS_FAILED(NS_GetSpecialDirectory(NS_OS_TEMP_DIR, getter_AddRefs(file)))
+      && !NS_FAILED(file->AppendNative(nsPrintfCString("Recording%lu", elapsed)))
+      && !NS_FAILED(file->GetPath(aResult));
+}
+
 /*static*/ already_AddRefed<ContentParent>
-ContentParent::GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
+ContentParent::GetNewOrUsedBrowserProcess(Element* aFrameElement,
+                                          const nsAString& aRemoteType,
                                           ProcessPriority aPriority,
                                           ContentParent* aOpener,
                                           bool aPreferUsed)
 {
+  // Figure out if this process will be recording or replaying, and which file
+  // to use for the recording.
+  RecordReplayState recordReplayState = eNotRecordingOrReplaying;
+  nsAutoString recordingFile;
+  if (aFrameElement) {
+    aFrameElement->GetAttr(kNameSpaceID_None, nsGkAtoms::ReplayExecution, recordingFile);
+    if (!recordingFile.IsEmpty()) {
+      recordReplayState = eReplaying;
+    } else {
+      aFrameElement->GetAttr(kNameSpaceID_None, nsGkAtoms::RecordExecution, recordingFile);
+      if (recordingFile.IsEmpty() && recordreplay::parent::SaveAllRecordingsDirectory()) {
+        recordingFile.AssignLiteral("*");
+      }
+      if (!recordingFile.IsEmpty()) {
+        if (recordingFile.EqualsLiteral("*") && !CreateTemporaryRecordingFile(recordingFile)) {
+          return nullptr;
+        }
+        recordReplayState = eRecording;
+      }
+    }
+  }
+
   nsTArray<ContentParent*>& contentParents = GetOrCreatePool(aRemoteType);
   uint32_t maxContentParents = GetMaxProcessCount(aRemoteType);
-  if (aRemoteType.EqualsLiteral(LARGE_ALLOCATION_REMOTE_TYPE)) {
+  if (recordReplayState != eNotRecordingOrReplaying) {
+    // Fall through and always create a new process when recording or replaying.
+  } else if (aRemoteType.EqualsLiteral(LARGE_ALLOCATION_REMOTE_TYPE)) {
     // We never want to re-use Large-Allocation processes.
     if (contentParents.Length() >= maxContentParents) {
-      return GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
+      return GetNewOrUsedBrowserProcess(aFrameElement,
+                                        NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
                                         aPriority,
                                         aOpener);
     }
   } else {
     uint32_t numberOfParents = contentParents.Length();
     nsTArray<nsIContentProcessInfo*> infos(numberOfParents);
     for (auto* cp : contentParents) {
       infos.AppendElement(cp->mScriptableHelper);
@@ -828,26 +871,29 @@ ContentParent::GetNewOrUsedBrowserProces
       p->mOpener = aOpener;
       contentParents.AppendElement(p);
       p->mActivateTS = TimeStamp::Now();
       return p.forget();
     }
   }
 
   // Create a new process from scratch.
-  RefPtr<ContentParent> p = new ContentParent(aOpener, aRemoteType);
+  RefPtr<ContentParent> p = new ContentParent(aOpener, aRemoteType, recordReplayState, recordingFile);
 
   // Until the new process is ready let's not allow to start up any preallocated processes.
   PreallocatedProcessManager::AddBlocker(p);
 
   if (!p->LaunchSubprocess(aPriority)) {
     return nullptr;
   }
 
-  contentParents.AppendElement(p);
+  if (recordReplayState == eNotRecordingOrReplaying) {
+    contentParents.AppendElement(p);
+  }
+
   p->mActivateTS = TimeStamp::Now();
   return p.forget();
 }
 
 /*static*/ already_AddRefed<ContentParent>
 ContentParent::GetNewOrUsedJSPluginProcess(uint32_t aPluginID,
                                            const hal::ProcessPriority& aPriority)
 {
@@ -934,17 +980,17 @@ ContentParent::RecvCreateChildProcess(co
     return IPC_FAIL_NO_REASON(this);
   }
 
   if (tc.GetTabContext().IsJSPlugin()) {
     cp = GetNewOrUsedJSPluginProcess(tc.GetTabContext().JSPluginId(),
                                      aPriority);
   }
   else {
-    cp = GetNewOrUsedBrowserProcess(NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
+    cp = GetNewOrUsedBrowserProcess(nullptr, NS_LITERAL_STRING(DEFAULT_REMOTE_TYPE),
                                     aPriority, this);
   }
 
   if (!cp) {
     *aCpId = 0;
     *aIsForBrowser = false;
     return IPC_OK();
   }
@@ -1155,17 +1201,17 @@ ContentParent::CreateBrowser(const TabCo
       constructorSender = aOpenerContentParent;
     } else {
       if (aContext.IsJSPlugin()) {
         constructorSender =
           GetNewOrUsedJSPluginProcess(aContext.JSPluginId(),
                                       initialPriority);
       } else {
         constructorSender =
-          GetNewOrUsedBrowserProcess(remoteType, initialPriority,
+          GetNewOrUsedBrowserProcess(aFrameElement, remoteType, initialPriority,
                                      nullptr, isPreloadBrowser);
       }
       if (!constructorSender) {
         return nullptr;
       }
     }
     ContentProcessManager* cpm = ContentProcessManager::GetSingleton();
     cpm->RegisterRemoteFrame(tabId,
@@ -1397,16 +1443,28 @@ ContentParent::ShutDownProcess(ShutDownM
     static_cast<ScriptableCPInfo*>(mScriptableHelper.get())->ProcessDied();
     mScriptableHelper = nullptr;
   }
 
   // Shutting down by sending a shutdown message works differently than the
   // other methods. We first call Shutdown() in the child. After the child is
   // ready, it calls FinishShutdown() on us. Then we close the channel.
   if (aMethod == SEND_SHUTDOWN_MESSAGE) {
+    if (const char* directory = recordreplay::parent::SaveAllRecordingsDirectory()) {
+      // Save a recording for the child process before it shuts down.
+      unsigned long elapsed = (TimeStamp::Now() - TimeStamp::ProcessCreation()).ToMilliseconds();
+      nsCOMPtr<nsIFile> file;
+      if (!NS_FAILED(NS_NewNativeLocalFile(nsDependentCString(directory), false,
+                                           getter_AddRefs(file))) &&
+          !NS_FAILED(file->AppendNative(nsPrintfCString("Recording%lu", elapsed)))) {
+        bool unused;
+        SaveRecording(file, &unused);
+      }
+    }
+
     if (mIPCOpen && !mShutdownPending) {
       // Stop sending input events with input priority when shutting down.
       SetInputPriorityEventEnabled(false);
       if (SendShutdown()) {
         mShutdownPending = true;
         // Start the force-kill timer if we haven't already.
         StartForceKillTimer();
       }
@@ -1786,16 +1844,24 @@ ContentParent::ActorDestroy(ActorDestroy
   }
   mIdleListeners.Clear();
 
   MessageLoop::current()->
     PostTask(NewRunnableFunction("DelayedDeleteSubprocessRunnable",
                                  DelayedDeleteSubprocess, mSubprocess));
   mSubprocess = nullptr;
 
+  // Delete any remaining replaying children.
+  for (auto& replayingProcess : mReplayingChildren) {
+    if (replayingProcess) {
+      DelayedDeleteSubprocess(replayingProcess);
+      replayingProcess = nullptr;
+    }
+  }
+
   // IPDL rules require actors to live on past ActorDestroy, but it
   // may be that the kungFuDeathGrip above is the last reference to
   // |this|.  If so, when we go out of scope here, we're deleted and
   // all hell breaks loose.
   //
   // This runnable ensures that a reference to |this| lives on at
   // least until after the current task finishes running.
   NS_DispatchToCurrentThread(new DelayedDeleteContentParentTask(this));
@@ -1973,16 +2039,75 @@ ContentParent::NotifyTabDestroyed(const 
     MessageLoop::current()->PostTask(
       NewRunnableMethod<ShutDownMethod>("dom::ContentParent::ShutDownProcess",
                                         this,
                                         &ContentParent::ShutDownProcess,
                                         SEND_SHUTDOWN_MESSAGE));
   }
 }
 
+mozilla::ipc::IPCResult
+ContentParent::RecvOpenRecordReplayChannel(const uint32_t& aChannelId,
+                                           FileDescriptor* aConnection)
+{
+  // We should only get this message from the child if it is recording or replaying.
+  if (!recordreplay::IsRecordingOrReplaying()) {
+    return IPC_FAIL_NO_REASON(this);
+  }
+
+  recordreplay::parent::OpenChannel(Pid(), aChannelId, aConnection);
+  return IPC_OK();
+}
+
+mozilla::ipc::IPCResult
+ContentParent::RecvCreateReplayingProcess(const uint32_t& aChannelId)
+{
+  // We should only get this message from the child if it is recording or replaying.
+  if (!recordreplay::IsRecordingOrReplaying()) {
+    return IPC_FAIL_NO_REASON(this);
+  }
+
+  while (aChannelId >= mReplayingChildren.length()) {
+    if (!mReplayingChildren.append(nullptr)) {
+      return IPC_FAIL_NO_REASON(this);
+    }
+  }
+  if (mReplayingChildren[aChannelId]) {
+    return IPC_FAIL_NO_REASON(this);
+  }
+
+  std::vector<std::string> extraArgs;
+  recordreplay::parent::GetArgumentsForChildProcess(Pid(), aChannelId,
+                                                    NS_ConvertUTF16toUTF8(mRecordingFile).get(),
+                                                    /* aRecording = */ false,
+                                                    extraArgs);
+
+  mReplayingChildren[aChannelId] = new GeckoChildProcessHost(GeckoProcessType_Content);
+  if (!mReplayingChildren[aChannelId]->LaunchAndWaitForProcessHandle(extraArgs)) {
+    return IPC_FAIL_NO_REASON(this);
+  }
+
+  return IPC_OK();
+}
+
+mozilla::ipc::IPCResult
+ContentParent::RecvTerminateReplayingProcess(const uint32_t& aChannelId)
+{
+  // We should only get this message from the child if it is recording or replaying.
+  if (!recordreplay::IsRecordingOrReplaying()) {
+    return IPC_FAIL_NO_REASON(this);
+  }
+
+  if (aChannelId < mReplayingChildren.length() && mReplayingChildren[aChannelId]) {
+    DelayedDeleteSubprocess(mReplayingChildren[aChannelId]);
+    mReplayingChildren[aChannelId] = nullptr;
+  }
+  return IPC_OK();
+}
+
 jsipc::CPOWManager*
 ContentParent::GetCPOWManager()
 {
   if (PJavaScriptParent* p = LoneManagedOrNullAsserts(ManagedPJavaScriptParent())) {
     return CPOWManagerFor(p);
   }
   return nullptr;
 }
@@ -2093,16 +2218,28 @@ ContentParent::LaunchSubprocess(ProcessP
   if (gSafeMode) {
     extraArgs.push_back("-safeMode");
   }
 
   nsCString parentBuildID(mozilla::PlatformBuildID());
   extraArgs.push_back("-parentBuildID");
   extraArgs.push_back(parentBuildID.get());
 
+  // Specify whether the process is recording or replaying an execution.
+  if (mRecordReplayState != eNotRecordingOrReplaying) {
+    nsPrintfCString buf("%d", mRecordReplayState == eRecording
+                              ? (int) recordreplay::ProcessKind::MiddlemanRecording
+                              : (int) recordreplay::ProcessKind::MiddlemanReplaying);
+    extraArgs.push_back(recordreplay::gProcessKindOption);
+    extraArgs.push_back(buf.get());
+
+    extraArgs.push_back(recordreplay::gRecordingFileOption);
+    extraArgs.push_back(NS_ConvertUTF16toUTF8(mRecordingFile).get());
+  }
+
   SetOtherProcessId(kInvalidProcessId, ProcessIdState::ePending);
 #ifdef ASYNC_CONTENTPROC_LAUNCH
   if (!mSubprocess->Launch(extraArgs)) {
 #else
   if (!mSubprocess->LaunchAndWaitForProcessHandle(extraArgs)) {
 #endif
     NS_ERROR("failed to launch child in the parent");
     MarkAsDead();
@@ -2146,30 +2283,34 @@ ContentParent::LaunchSubprocess(ProcessP
 
   Init();
 
   return true;
 }
 
 ContentParent::ContentParent(ContentParent* aOpener,
                              const nsAString& aRemoteType,
+                             RecordReplayState aRecordReplayState,
+                             const nsAString& aRecordingFile,
                              int32_t aJSPluginID)
   : nsIContentParent()
   , mSubprocess(nullptr)
   , mLaunchTS(TimeStamp::Now())
   , mActivateTS(TimeStamp::Now())
   , mOpener(aOpener)
   , mRemoteType(aRemoteType)
   , mChildID(gContentChildID++)
   , mGeolocationWatchID(-1)
   , mJSPluginID(aJSPluginID)
   , mNumDestroyingTabs(0)
   , mIsAvailable(true)
   , mIsAlive(true)
   , mIsForBrowser(!mRemoteType.IsEmpty())
+  , mRecordReplayState(aRecordReplayState)
+  , mRecordingFile(aRecordingFile)
   , mCalledClose(false)
   , mCalledKillHard(false)
   , mCreatedPairedMinidumps(false)
   , mShutdownPending(false)
   , mIPCOpen(true)
   , mIsRemoteInputEventQueueEnabled(false)
   , mIsInputPriorityEventEnabled(false)
   , mHangMonitorActor(nullptr)
@@ -5084,16 +5225,28 @@ ContentParent::RecvGraphicsError(const n
     std::stringstream message;
     message << "CP+" << aError.get();
     lf->UpdateStringsVector(message.str());
   }
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult
+ContentParent::RecvRecordReplayFatalError(const nsCString& aError)
+{
+  nsCOMPtr<nsIPromptService> promptService(do_GetService(NS_PROMPTSERVICE_CONTRACTID));
+  MOZ_RELEASE_ASSERT(promptService);
+
+  nsAutoCString str(aError);
+  promptService->Alert(nullptr, u"Fatal Record/Replay Error", NS_ConvertUTF8toUTF16(str).get());
+
+  return IPC_OK();
+}
+
+mozilla::ipc::IPCResult
 ContentParent::RecvBeginDriverCrashGuard(const uint32_t& aGuardType, bool* aOutCrashed)
 {
   // Only one driver crash guard should be active at a time, per-process.
   MOZ_ASSERT(!mDriverCrashGuard);
 
   UniquePtr<gfx::DriverCrashGuard> guard;
   switch (gfx::CrashGuardType(aGuardType)) {
   case gfx::CrashGuardType::D3D11Layers:
@@ -5747,16 +5900,41 @@ ContentParent::CanCommunicateWith(Conten
     return false;
   }
   if (IsForJSPlugin()) {
     return parentId == ContentParentId(0);
   }
   return parentId == aOtherProcess;
 }
 
+nsresult
+ContentParent::SaveRecording(nsIFile* aFile, bool* aRetval)
+{
+  if (mRecordReplayState != eRecording) {
+    *aRetval = false;
+    return NS_OK;
+  }
+
+  PRFileDesc* prfd;
+  nsresult rv = aFile->OpenNSPRFileDesc(PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE, 0644, &prfd);
+  if (NS_FAILED(rv)) {
+    return rv;
+  }
+
+  FileDescriptor::PlatformHandleType handle =
+    FileDescriptor::PlatformHandleType(PR_FileDesc2NativeHandle(prfd));
+
+  Unused << SendSaveRecording(FileDescriptor(handle));
+
+  PR_Close(prfd);
+
+  *aRetval = true;
+  return NS_OK;
+}
+
 mozilla::ipc::IPCResult
 ContentParent::RecvMaybeReloadPlugins()
 {
   RefPtr<nsPluginHost> pluginHost = nsPluginHost::GetInst();
   pluginHost->ReloadPlugins();
   return IPC_OK();
 }
 
--- a/dom/ipc/ContentParent.h
+++ b/dom/ipc/ContentParent.h
@@ -169,17 +169,18 @@ public:
 
   /**
    * Get or create a content process for:
    * 1. browser iframe
    * 2. remote xul <browser>
    * 3. normal iframe
    */
   static already_AddRefed<ContentParent>
-  GetNewOrUsedBrowserProcess(const nsAString& aRemoteType,
+  GetNewOrUsedBrowserProcess(Element* aFrameElement,
+                             const nsAString& aRemoteType,
                              hal::ProcessPriority aPriority =
                              hal::ProcessPriority::PROCESS_PRIORITY_FOREGROUND,
                              ContentParent* aOpener = nullptr,
                              bool aPreferUsed = false);
 
   /**
    * Get or create a content process for a JS plugin. aPluginID is the id of the JS plugin
    * (@see nsFakePlugin::mId). There is a maximum of one process per JS plugin.
@@ -298,16 +299,21 @@ public:
                                                          const TabId& aOpenerTabId,
                                                          const TabId& aTabId,
                                                          ContentParentId* aCpId,
                                                          bool* aIsForBrowser) override;
 
   virtual mozilla::ipc::IPCResult RecvBridgeToChildProcess(const ContentParentId& aCpId,
                                                            Endpoint<PContentBridgeParent>* aEndpoint) override;
 
+  virtual mozilla::ipc::IPCResult RecvOpenRecordReplayChannel(const uint32_t& channelId,
+                                                              FileDescriptor* connection) override;
+  virtual mozilla::ipc::IPCResult RecvCreateReplayingProcess(const uint32_t& aChannelId) override;
+  virtual mozilla::ipc::IPCResult RecvTerminateReplayingProcess(const uint32_t& aChannelId) override;
+
   virtual mozilla::ipc::IPCResult RecvCreateGMPService() override;
 
   virtual mozilla::ipc::IPCResult RecvLoadPlugin(const uint32_t& aPluginId, nsresult* aRv,
                                                  uint32_t* aRunID,
                                                  Endpoint<PPluginModuleParent>* aEndpoint) override;
 
   virtual mozilla::ipc::IPCResult RecvMaybeReloadPlugins() override;
 
@@ -738,26 +744,38 @@ private:
                      bool* aWindowIsNew,
                      int32_t& aOpenLocation,
                      nsIPrincipal* aTriggeringPrincipal,
                      uint32_t aReferrerPolicy,
                      bool aLoadUri);
 
   FORWARD_SHMEM_ALLOCATOR_TO(PContentParent)
 
+  enum RecordReplayState
+  {
+    eNotRecordingOrReplaying,
+    eRecording,
+    eReplaying
+  };
+
   explicit ContentParent(int32_t aPluginID)
-    : ContentParent(nullptr, EmptyString(), aPluginID)
+    : ContentParent(nullptr, EmptyString(), eNotRecordingOrReplaying, EmptyString(), aPluginID)
   {}
   ContentParent(ContentParent* aOpener,
-                const nsAString& aRemoteType)
-    : ContentParent(aOpener, aRemoteType, nsFakePluginTag::NOT_JSPLUGIN)
+                const nsAString& aRemoteType,
+                RecordReplayState aRecordReplayState = eNotRecordingOrReplaying,
+                const nsAString& aRecordingFile = EmptyString())
+    : ContentParent(aOpener, aRemoteType, aRecordReplayState, aRecordingFile,
+                    nsFakePluginTag::NOT_JSPLUGIN)
   {}
 
   ContentParent(ContentParent* aOpener,
                 const nsAString& aRemoteType,
+                RecordReplayState aRecordReplayState,
+                const nsAString& aRecordingFile,
                 int32_t aPluginID);
 
   // Launch the subprocess and associated initialization.
   // Returns false if the process fails to start.
   bool LaunchSubprocess(hal::ProcessPriority aInitialPriority = hal::PROCESS_PRIORITY_FOREGROUND);
 
   // Common initialization after sub process launch.
   void InitInternal(ProcessPriority aPriority);
@@ -1121,16 +1139,18 @@ public:
                                                           const TabId& aTabId,
                                                           layers::LayersId* aId) override;
 
   virtual mozilla::ipc::IPCResult RecvDeallocateLayerTreeId(const ContentParentId& aCpId,
                                                             const layers::LayersId& aId) override;
 
   virtual mozilla::ipc::IPCResult RecvGraphicsError(const nsCString& aError) override;
 
+  virtual mozilla::ipc::IPCResult RecvRecordReplayFatalError(const nsCString& aError) override;
+
   virtual mozilla::ipc::IPCResult
   RecvBeginDriverCrashGuard(const uint32_t& aGuardType,
                             bool* aOutCrashed) override;
 
   virtual mozilla::ipc::IPCResult RecvEndDriverCrashGuard(const uint32_t& aGuardType) override;
 
   virtual mozilla::ipc::IPCResult RecvAddIdleObserver(const uint64_t& observerId,
                                                       const uint32_t& aIdleTimeInS) override;
@@ -1241,16 +1261,18 @@ public:
 
   bool SendRequestMemoryReport(const uint32_t& aGeneration,
                                const bool& aAnonymize,
                                const bool& aMinimizeMemoryUsage,
                                const MaybeFileDesc& aDMDFile) override;
 
   bool CanCommunicateWith(ContentParentId aOtherProcess);
 
+  nsresult SaveRecording(nsIFile* aFile, bool* aRetval);
+
 private:
 
   // If you add strong pointers to cycle collected objects here, be sure to
   // release these objects in ShutDownProcess.  See the comment there for more
   // details.
 
   ContentProcessHost* mSubprocess;
   const TimeStamp mLaunchTS; // used to calculate time to start content process
@@ -1284,16 +1306,26 @@ private:
   bool mIsAvailable;
   // True only while remote content is being actively used from this process.
   // After mIsAlive goes to false, some previously scheduled IPC traffic may
   // still pass through.
   bool mIsAlive;
 
   bool mIsForBrowser;
 
+  // Whether this process is recording or replaying its execution, and any
+  // associated recording file.
+  RecordReplayState mRecordReplayState;
+  nsString mRecordingFile;
+
+  // When recording or replaying, the child process is a middleman. This vector
+  // stores any replaying children we have spawned on behalf of that middleman,
+  // indexed by their record/replay channel ID.
+  Vector<mozilla::ipc::GeckoChildProcessHost*> mReplayingChildren;
+
   // These variables track whether we've called Close() and KillHard() on our
   // channel.
   bool mCalledClose;
   bool mCalledKillHard;
   bool mCreatedPairedMinidumps;
   bool mShutdownPending;
   bool mIPCOpen;
 
--- a/dom/ipc/ContentProcess.cpp
+++ b/dom/ipc/ContentProcess.cpp
@@ -5,16 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "mozilla/ipc/IOThreadChild.h"
 
 #include "ContentProcess.h"
 #include "base/shared_memory.h"
 #include "mozilla/Preferences.h"
 #include "mozilla/Scheduler.h"
+#include "mozilla/recordreplay/ParentIPC.h"
 
 #if defined(XP_MACOSX) && defined(MOZ_CONTENT_SANDBOX)
 #include <stdlib.h>
 #endif
 
 #if (defined(XP_WIN) || defined(XP_MACOSX)) && defined(MOZ_CONTENT_SANDBOX)
 #include "mozilla/SandboxSettings.h"
 #include "nsAppDirectoryServiceDefs.h"
@@ -256,40 +257,56 @@ ContentProcess::Init(int aArgc, char* aA
       prefsLen.isNothing() ||
       prefMapHandle.isNothing() ||
       prefMapSize.isNothing() ||
       schedulerPrefs.isNothing() ||
       parentBuildID.isNothing()) {
     return false;
   }
 
-  // Init the shared-memory base preference mapping first, so that only changed
-  // preferences wind up in heap memory.
-  Preferences::InitSnapshot(prefMapHandle.ref(), *prefMapSize);
+  if (recordreplay::IsRecordingOrReplaying()) {
+    // Set up early prefs from shmem contents passed to us by the middleman.
+    Preferences::DeserializePreferences(recordreplay::child::PrefsShmemContents(*prefsLen),
+                                        *prefsLen);
+  } else {
+    // Init the shared-memory base preference mapping first, so that only changed
+    // preferences wind up in heap memory.
+    Preferences::InitSnapshot(prefMapHandle.ref(), *prefMapSize);
 
-  // Set up early prefs from the shared memory.
-  base::SharedMemory shm;
-  if (!shm.SetHandle(*prefsHandle, /* read_only */ true)) {
-    NS_ERROR("failed to open shared memory in the child");
-    return false;
+    // Set up early prefs from the shared memory.
+    base::SharedMemory shm;
+    if (!shm.SetHandle(*prefsHandle, /* read_only */ true)) {
+      NS_ERROR("failed to open shared memory in the child");
+      return false;
+    }
+    if (!shm.Map(*prefsLen)) {
+      NS_ERROR("failed to map shared memory in the child");
+      return false;
+    }
+    Preferences::DeserializePreferences(static_cast<char*>(shm.memory()),
+                                        *prefsLen);
+    if (recordreplay::IsMiddleman()) {
+      recordreplay::parent::NotePrefsShmemContents(static_cast<char*>(shm.memory()),
+                                                   *prefsLen);
+    }
   }
-  if (!shm.Map(*prefsLen)) {
-    NS_ERROR("failed to map shared memory in the child");
-    return false;
-  }
-  Preferences::DeserializePreferences(static_cast<char*>(shm.memory()),
-                                      *prefsLen);
 
   Scheduler::SetPrefs(*schedulerPrefs);
+
+  if (recordreplay::IsMiddleman()) {
+    recordreplay::parent::InitializeMiddleman(aArgc, aArgv, ParentPid());
+  }
+
   mContent.Init(IOThreadChild::message_loop(),
                 ParentPid(),
                 *parentBuildID,
                 IOThreadChild::channel(),
                 *childID,
                 *isForBrowser);
+
   mXREEmbed.Start();
 #if (defined(XP_MACOSX)) && defined(MOZ_CONTENT_SANDBOX)
   mContent.SetProfileDir(profileDir);
 #endif
 
 #if defined(XP_WIN) && defined(MOZ_CONTENT_SANDBOX)
   SetUpSandboxEnvironment();
 #endif
--- a/dom/ipc/PContent.ipdl
+++ b/dom/ipc/PContent.ipdl
@@ -721,27 +721,35 @@ child:
 
     /*
      * Message to construct a PClientOpenWindowOp actor.  This is used to
      * open windows cross-process and receive notification when the operation
      * has completed.
      */
     async PClientOpenWindowOp(ClientOpenWindowArgs aArgs);
 
+    /* Save the execution up to the current point in a recording process. */
+    async SaveRecording(FileDescriptor file);
+
 parent:
     async InitBackground(Endpoint<PBackgroundParent> aEndpoint);
 
     sync CreateChildProcess(IPCTabContext context,
                             ProcessPriority priority,
                             TabId openerTabId,
                             TabId tabId)
         returns (ContentParentId cpId, bool isForBrowser);
     sync BridgeToChildProcess(ContentParentId cpId)
         returns (Endpoint<PContentBridgeParent> endpoint);
 
+    sync OpenRecordReplayChannel(uint32_t channelId)
+        returns (FileDescriptor connection);
+    async CreateReplayingProcess(uint32_t channelId);
+    async TerminateReplayingProcess(uint32_t channelId);
+
     async CreateGMPService();
 
     async InitStreamFilter(uint64_t channelId, nsString addonId)
         returns (Endpoint<PStreamFilterChild> aEndpoint);
 
     /**
      * This call connects the content process to a plugin process. This call
      * returns an endpoint for a new PluginModuleParent. The corresponding
@@ -926,16 +934,19 @@ parent:
     async RecordingDeviceEvents(nsString recordingStatus,
                                 nsString pageURL,
                                 bool isAudio,
                                 bool isVideo);
 
     // Graphics errors
     async GraphicsError(nsCString aError);
 
+    // Record/replay errors.
+    async RecordReplayFatalError(nsCString aError);
+
     // Driver crash guards. aGuardType must be a member of CrashGuardType.
     sync BeginDriverCrashGuard(uint32_t aGuardType) returns (bool crashDetected);
     sync EndDriverCrashGuard(uint32_t aGuardType);
 
     async AddIdleObserver(uint64_t observerId, uint32_t idleTimeInS);
     async RemoveIdleObserver(uint64_t observerId, uint32_t idleTimeInS);
 
     /**
--- a/dom/ipc/TabChild.h
+++ b/dom/ipc/TabChild.h
@@ -654,16 +654,18 @@ public:
                   const FrameMetrics::ViewID& aViewId,
                   const CSSRect& aRect,
                   const uint32_t& aFlags);
 
   // Request that the docshell be marked as active.
   void PaintWhileInterruptingJS(uint64_t aLayerObserverEpoch,
                                 bool aForceRepaint);
 
+  uint64_t LayerObserverEpoch() const { return mLayerObserverEpoch; }
+
 #if defined(XP_WIN) && defined(ACCESSIBILITY)
   uintptr_t GetNativeWindowHandle() const { return mNativeWindowHandle; }
 #endif
 
   // These methods return `true` if this TabChild is currently awaiting a
   // Large-Allocation header.
   bool StopAwaitingLargeAlloc();
   bool IsAwaitingLargeAlloc();
--- a/dom/ipc/TabParent.cpp
+++ b/dom/ipc/TabParent.cpp
@@ -2960,16 +2960,27 @@ TabParent::SetRenderLayersInternal(bool 
 NS_IMETHODIMP
 TabParent::PreserveLayers(bool aPreserveLayers)
 {
   mPreserveLayers = aPreserveLayers;
   return NS_OK;
 }
 
 NS_IMETHODIMP
+TabParent::SaveRecording(const nsAString& aFilename, bool* aRetval)
+{
+  nsCOMPtr<nsIFile> file;
+  nsresult rv = NS_NewLocalFile(aFilename, false, getter_AddRefs(file));
+  if (NS_FAILED(rv)) {
+    return rv;
+  }
+  return Manager()->AsContentParent()->SaveRecording(file, aRetval);
+}
+
+NS_IMETHODIMP
 TabParent::SuppressDisplayport(bool aEnabled)
 {
   if (IsDestroyed()) {
     return NS_OK;
   }
 
 #ifdef DEBUG
   if (aEnabled) {
--- a/gfx/layers/ipc/CompositorBridgeParent.h
+++ b/gfx/layers/ipc/CompositorBridgeParent.h
@@ -166,16 +166,20 @@ public:
                            uint32_t aApzcId) override;
   bool StopSharingMetrics(FrameMetrics::ViewID aScrollId,
                           uint32_t aApzcId) override;
 
   virtual bool IsRemote() const {
     return false;
   }
 
+  virtual void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr) {
+    MOZ_CRASH();
+  }
+
 protected:
   ~CompositorBridgeParentBase() override;
 
   bool mCanSend;
 
 private:
   RefPtr<CompositorManagerParent> mCompositorManager;
 };
@@ -441,17 +445,17 @@ public:
 
   /**
    * Used by the profiler to denote when a vsync occured
    */
   static void PostInsertVsyncProfilerMarker(mozilla::TimeStamp aVsyncTimestamp);
 
   widget::CompositorWidget* GetWidget() { return mWidget; }
 
-  void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr);
+  virtual void ForceComposeToTarget(gfx::DrawTarget* aTarget, const gfx::IntRect* aRect = nullptr) override;
 
   PAPZCTreeManagerParent* AllocPAPZCTreeManagerParent(const LayersId& aLayersId) override;
   bool DeallocPAPZCTreeManagerParent(PAPZCTreeManagerParent* aActor) override;
 
   // Helper method so that we don't have to expose mApzcTreeManager to
   // CrossProcessCompositorBridgeParent.
   void AllocateAPZCTreeManagerParent(const MonitorAutoLock& aProofOfLayerTreeStateLock,
                                      const LayersId& aLayersId,
--- a/gfx/layers/ipc/LayerTransactionParent.cpp
+++ b/gfx/layers/ipc/LayerTransactionParent.cpp
@@ -157,16 +157,22 @@ LayerTransactionParent::RecvPaintTime(co
 {
   mCompositorBridge->UpdatePaintTime(this, aPaintTime);
   return IPC_OK();
 }
 
 mozilla::ipc::IPCResult
 LayerTransactionParent::RecvUpdate(const TransactionInfo& aInfo)
 {
+  auto guard = MakeScopeExit([&] {
+      if (recordreplay::IsRecordingOrReplaying()) {
+        recordreplay::child::NotifyPaintComplete();
+      }
+    });
+
   AUTO_PROFILER_TRACING("Paint", "LayerTransaction");
   AUTO_PROFILER_LABEL("LayerTransactionParent::RecvUpdate", GRAPHICS);
 
   TimeStamp updateStart = TimeStamp::Now();
 
   MOZ_LAYERS_LOG(("[ParentSide] received txn with %zu edits", aInfo.cset().Length()));
 
   UpdateFwdTransactionId(aInfo.fwdTransactionId());
@@ -484,16 +490,21 @@ LayerTransactionParent::RecvUpdate(const
       printf_stderr("LayerTransactionParent::RecvUpdate transaction from process %d took %f ms",
                     OtherPid(),
                     latency.ToMilliseconds());
     }
 
     mLayerManager->RecordUpdateTime((TimeStamp::Now() - updateStart).ToMilliseconds());
   }
 
+  // Compose after every update when recording/replaying.
+  if (recordreplay::IsRecordingOrReplaying()) {
+    mCompositorBridge->ForceComposeToTarget(nullptr);
+  }
+
   return IPC_OK();
 }
 
 bool
 LayerTransactionParent::SetLayerAttributes(const OpSetLayerAttributes& aOp)
 {
   Layer* layer = AsLayer(aOp.layer());
   if (!layer) {
--- a/gfx/layers/ipc/ShadowLayers.cpp
+++ b/gfx/layers/ipc/ShadowLayers.cpp
@@ -762,23 +762,31 @@ ShadowLayerForwarder::EndTransaction(con
     mPaintTiming.serializeMs() = (TimeStamp::Now() - startTime.value()).ToMilliseconds();
     startTime = Some(TimeStamp::Now());
   }
 
   // We delay at the last possible minute, to give the paint thread a chance to
   // finish. If it does we don't have to delay messages at all.
   GetCompositorBridgeChild()->PostponeMessagesIfAsyncPainting();
 
+  if (recordreplay::IsRecordingOrReplaying()) {
+    recordreplay::child::NotifyPaintStart();
+  }
+
   MOZ_LAYERS_LOG(("[LayersForwarder] sending transaction..."));
   RenderTraceScope rendertrace3("Forward Transaction", "000093");
   if (!mShadowManager->SendUpdate(info)) {
     MOZ_LAYERS_LOG(("[LayersForwarder] WARNING: sending transaction failed!"));
     return false;
   }
 
+  if (recordreplay::IsRecordingOrReplaying()) {
+    recordreplay::child::WaitForPaintToComplete();
+  }
+
   if (startTime) {
     mPaintTiming.sendMs() = (TimeStamp::Now() - startTime.value()).ToMilliseconds();
     mShadowManager->SendRecordPaintTimes(mPaintTiming);
   }
 
   *aSent = true;
   mIsFirstPaint = false;
   mFocusTarget = FocusTarget();
--- a/ipc/chromium/src/base/pickle.cc
+++ b/ipc/chromium/src/base/pickle.cc
@@ -170,16 +170,24 @@ Pickle& Pickle::operator=(Pickle&& other
   buffers_ = std::move(tmp);
 
   //std::swap(buffers_, other.buffers_);
   std::swap(header_, other.header_);
   std::swap(header_size_, other.header_size_);
   return *this;
 }
 
+void Pickle::CopyFrom(const Pickle& other) {
+  MOZ_ALWAYS_TRUE(buffers_.CopyFrom(other.buffers_));
+  MOZ_ASSERT(other.header_ == reinterpret_cast<const Header*>(other.buffers_.Start()));
+
+  header_ = reinterpret_cast<Header*>(buffers_.Start());
+  header_size_ = other.header_size_;
+}
+
 bool Pickle::ReadBool(PickleIterator* iter, bool* result) const {
   DCHECK(iter);
 
   int tmp;
   if (!ReadInt(iter, &tmp))
     return false;
   DCHECK(0 == tmp || 1 == tmp);
   *result = tmp ? true : false;
--- a/ipc/chromium/src/base/pickle.h
+++ b/ipc/chromium/src/base/pickle.h
@@ -73,16 +73,18 @@ class Pickle {
 
   Pickle(Pickle&& other);
 
   // Performs a deep copy.
   Pickle& operator=(const Pickle& other) = delete;
 
   Pickle& operator=(Pickle&& other);
 
+  void CopyFrom(const Pickle& other);
+
   // Returns the size of the Pickle's data.
   uint32_t size() const { return header_size_ + header_->payload_size; }
 
   typedef mozilla::BufferList<InfallibleAllocPolicy> BufferList;
 
   const BufferList& Buffers() const { return buffers_; }
 
   uint32_t CurrentSize() const { return buffers_.Size(); }
--- a/ipc/chromium/src/chrome/common/ipc_message.cc
+++ b/ipc/chromium/src/chrome/common/ipc_message.cc
@@ -140,16 +140,22 @@ Message::ForInterruptDispatchError()
 Message& Message::operator=(Message&& other) {
   *static_cast<Pickle*>(this) = std::move(other);
 #if defined(OS_POSIX)
   file_descriptor_set_.swap(other.file_descriptor_set_);
 #endif
   return *this;
 }
 
+void Message::CopyFrom(const Message& other) {
+  Pickle::CopyFrom(other);
+#if defined(OS_POSIX)
+  file_descriptor_set_ = other.file_descriptor_set_;
+#endif
+}
 
 #if defined(OS_POSIX)
 bool Message::WriteFileDescriptor(const base::FileDescriptor& descriptor) {
   // We write the index of the descriptor so that we don't have to
   // keep the current descriptor as extra decoding state when deserialising.
   // Also, we rely on each file descriptor being accompanied by sizeof(int)
   // bytes of data in the message. See the comment for input_cmsg_buf_.
   WriteInt(file_descriptor_set()->size());
--- a/ipc/chromium/src/chrome/common/ipc_message.h
+++ b/ipc/chromium/src/chrome/common/ipc_message.h
@@ -208,16 +208,18 @@ class Message : public Pickle {
 
   Message(const char* data, int data_len);
 
   Message(const Message& other) = delete;
   Message(Message&& other);
   Message& operator=(const Message& other) = delete;
   Message& operator=(Message&& other);
 
+  void CopyFrom(const Message& other);
+
   // Helper method for the common case (default segmentCapacity, recording
   // the write latency of messages) of IPDL message creation.  This helps
   // move the malloc and some of the parameter setting out of autogenerated
   // code.
   static Message* IPDLMessage(int32_t routing_id,
                               msgid_t type,
                               HeaderFlags flags);
 
--- a/ipc/glue/ProtocolUtils.cpp
+++ b/ipc/glue/ProtocolUtils.cpp
@@ -13,16 +13,18 @@
 
 #include "mozilla/IntegerPrintfMacros.h"
 
 #include "mozilla/ipc/ProtocolUtils.h"
 
 #include "mozilla/dom/ContentParent.h"
 #include "mozilla/ipc/MessageChannel.h"
 #include "mozilla/ipc/Transport.h"
+#include "mozilla/recordreplay/ChildIPC.h"
+#include "mozilla/recordreplay/ParentIPC.h"
 #include "mozilla/StaticMutex.h"
 #include "mozilla/SystemGroup.h"
 #include "mozilla/Unused.h"
 #include "nsPrintfCString.h"
 
 #if defined(MOZ_SANDBOX) && defined(XP_WIN)
 #include "mozilla/sandboxTarget.h"
 #endif
@@ -718,17 +720,25 @@ IToplevelProtocol::OtherPidMaybeInvalid(
   return mOtherPid;
 }
 
 void
 IToplevelProtocol::SetOtherProcessId(base::ProcessId aOtherPid,
                                      ProcessIdState aState)
 {
   MonitorAutoLock lock(mMonitor);
-  mOtherPid = aOtherPid;
+  // When recording an execution, all communication we do is forwarded from
+  // the middleman to the parent process, so use its pid instead of the
+  // middleman's pid.
+  if (recordreplay::IsRecordingOrReplaying() &&
+      aOtherPid == recordreplay::child::MiddlemanProcessId()) {
+    mOtherPid = recordreplay::child::ParentProcessId();
+  } else {
+    mOtherPid = aOtherPid;
+  }
   mOtherPidState = aState;
   lock.NotifyAll();
 }
 
 bool
 IToplevelProtocol::TakeMinidump(nsIFile** aDump, uint32_t* aSequence)
 {
   MOZ_RELEASE_ASSERT(GetSide() == ParentSide);
@@ -1061,19 +1071,19 @@ IToplevelProtocol::ToplevelState::Replac
 
   MutexAutoLock lock(mEventTargetMutex);
   mEventTargetMap.ReplaceWithID(aEventTarget, id);
 }
 
 const MessageChannel*
 IToplevelProtocol::ToplevelState::GetIPCChannel() const
 {
-  return &mChannel;
+  return ProtocolState::mChannel ? ProtocolState::mChannel : &mChannel;
 }
 
 MessageChannel*
 IToplevelProtocol::ToplevelState::GetIPCChannel()
 {
-  return &mChannel;
+  return ProtocolState::mChannel ? ProtocolState::mChannel : &mChannel;
 }
 
 } // namespace ipc
 } // namespace mozilla
--- a/ipc/glue/ProtocolUtils.h
+++ b/ipc/glue/ProtocolUtils.h
@@ -19,16 +19,17 @@
 #include "mozilla/AlreadyAddRefed.h"
 #include "mozilla/Attributes.h"
 #include "mozilla/ipc/ByteBuf.h"
 #include "mozilla/ipc/FileDescriptor.h"
 #include "mozilla/ipc/MessageChannel.h"
 #include "mozilla/ipc/Shmem.h"
 #include "mozilla/ipc/Transport.h"
 #include "mozilla/ipc/MessageLink.h"
+#include "mozilla/recordreplay/ChildIPC.h"
 #include "mozilla/LinkedList.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/MozPromise.h"
 #include "mozilla/Mutex.h"
 #include "mozilla/NotNull.h"
 #include "mozilla/Scoped.h"
 #include "mozilla/UniquePtr.h"
 #include "MainThreadUtils.h"
@@ -283,16 +284,23 @@ public:
     MessageChannel* GetIPCChannel()
     {
         return mState->GetIPCChannel();
     }
     const MessageChannel* GetIPCChannel() const
     {
         return mState->GetIPCChannel();
     }
+    void SetMiddlemanIPCChannel(MessageChannel* aChannel)
+    {
+        // Middleman processes sometimes need to change the channel used by a
+        // protocol.
+        MOZ_RELEASE_ASSERT(recordreplay::IsMiddleman());
+        mState->SetIPCChannel(aChannel);
+    }
 
     // XXX odd ducks, acknowledged
     virtual ProcessId OtherPid() const;
     Side GetSide() const { return mSide; }
 
     void FatalError(const char* const aErrorMsg) const;
     virtual void HandleFatalError(const char* aErrorMsg) const;
 
@@ -860,17 +868,26 @@ public:
         return mOtherPid;
     }
 
     // This method binds aActor to this endpoint. After this call, the actor can
     // be used to send and receive messages. The endpoint becomes invalid.
     bool Bind(PFooSide* aActor)
     {
         MOZ_RELEASE_ASSERT(mValid);
-        MOZ_RELEASE_ASSERT(mMyPid == base::GetCurrentProcId());
+        if (mMyPid != base::GetCurrentProcId()) {
+            // These pids must match, unless we are recording or replaying, in
+            // which case the parent process will have supplied the pid for the
+            // middleman process instead. Fix this here. If we're replaying
+            // we'll see the pid of the middleman used while recording.
+            MOZ_RELEASE_ASSERT(recordreplay::IsRecordingOrReplaying());
+            MOZ_RELEASE_ASSERT(recordreplay::IsReplaying() ||
+                               mMyPid == recordreplay::child::MiddlemanProcessId());
+            mMyPid = base::GetCurrentProcId();
+        }
 
         UniquePtr<Transport> t = mozilla::ipc::OpenDescriptor(mTransport, mMode);
         if (!t) {
             return false;
         }
         if (!aActor->Open(t.get(), mOtherPid, XRE_GetIOMessageLoop(),
                           mMode == Transport::MODE_SERVER ? ParentSide : ChildSide)) {
             return false;
--- a/ipc/ipdl/sync-messages.ini
+++ b/ipc/ipdl/sync-messages.ini
@@ -844,16 +844,18 @@ description =
 [PBrowser::EnsureLayersConnected]
 description =
 [PContent::SyncMessage]
 description =
 [PContent::CreateChildProcess]
 description =
 [PContent::BridgeToChildProcess]
 description =
+[PContent::OpenRecordReplayChannel]
+description = bug 1475898 this could be async
 [PContent::LoadPlugin]
 description =
 [PContent::ConnectPluginBridge]
 description =
 [PContent::IsSecureURI]
 description =
 [PContent::PURLClassifier]
 description =
--- a/js/xpconnect/idl/xpccomponents.idl
+++ b/js/xpconnect/idl/xpccomponents.idl
@@ -692,16 +692,19 @@ interface nsIXPCComponents_Utils : nsISu
      * thread. Calling unblockThreadedExecution will re-enable thread
      * scheduling of the main thread. Multiple calls to
      * blockThreadedExecution will require the same number of calls to
      * unblockThreadedExecution in order to resume cooperative
      * scheduling.
      */
     void blockThreadedExecution(in nsIBlockThreadedExecutionCallback aBlockedCallback);
     void unblockThreadedExecution();
+
+    /* Give a directive to the record/replay system. */
+    void recordReplayDirective(in long directive);
 };
 
 /**
 * Interface for the 'Components' object.
 *
 * The first interface contains things that are available to non-chrome XBL code
 * that runs in a scope with an ExpandedPrincipal. The second interface
 * includes members that are only exposed to chrome.
--- a/js/xpconnect/src/XPCComponents.cpp
+++ b/js/xpconnect/src/XPCComponents.cpp
@@ -3064,16 +3064,23 @@ nsXPCComponents_Utils::BlockThreadedExec
 
 NS_IMETHODIMP
 nsXPCComponents_Utils::UnblockThreadedExecution()
 {
     Scheduler::UnblockThreadedExecution();
     return NS_OK;
 }
 
+NS_IMETHODIMP
+nsXPCComponents_Utils::RecordReplayDirective(int aDirective)
+{
+    recordreplay::RecordReplayDirective(aDirective);
+    return NS_OK;
+}
+
 /***************************************************************************/
 /***************************************************************************/
 /***************************************************************************/
 
 
 nsXPCComponentsBase::nsXPCComponentsBase(XPCWrappedNativeScope* aScope)
     :   mScope(aScope)
 {
--- a/mfbt/BufferList.h
+++ b/mfbt/BufferList.h
@@ -130,16 +130,38 @@ class BufferList : private AllocPolicy
   {
     MOZ_ASSERT(mSegments.empty());
     MOZ_ASSERT(aInitialCapacity != 0);
     MOZ_ASSERT(aInitialCapacity % kSegmentAlignment == 0);
 
     return AllocateSegment(aInitialSize, aInitialCapacity);
   }
 
+  bool CopyFrom(const BufferList& aOther)
+  {
+    MOZ_ASSERT(mOwning);
+
+    Clear();
+
+    // We don't make an exact copy of aOther. Instead, create a single segment
+    // with enough space to hold all data in aOther.
+    if (!Init(aOther.mSize, (aOther.mSize + kSegmentAlignment - 1) & ~(kSegmentAlignment - 1))) {
+      return false;
+    }
+
+    size_t offset = 0;
+    for (const Segment& segment : aOther.mSegments) {
+      memcpy(Start() + offset, segment.mData, segment.mSize);
+      offset += segment.mSize;
+    }
+    MOZ_ASSERT(offset == mSize);
+
+    return true;
+  }
+
   // Returns the sum of the sizes of all the buffers.
   size_t Size() const { return mSize; }
 
   size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf)
   {
     size_t size = mSegments.sizeOfExcludingThis(aMallocSizeOf);
     for (Segment& segment : mSegments) {
       size += aMallocSizeOf(segment.Start());
--- a/mfbt/RecordReplay.cpp
+++ b/mfbt/RecordReplay.cpp
@@ -28,67 +28,64 @@ namespace recordreplay {
   Macro(InternalAreThreadEventsPassedThrough, bool, (), ())     \
   Macro(InternalAreThreadEventsDisallowed, bool, (), ())        \
   Macro(InternalRecordReplayValue, size_t, (size_t aValue), (aValue)) \
   Macro(InternalHasDivergedFromRecording, bool, (), ())         \
   Macro(InternalGeneratePLDHashTableCallbacks, const PLDHashTableOps*, \
         (const PLDHashTableOps* aOps), (aOps))                  \
   Macro(InternalUnwrapPLDHashTableCallbacks, const PLDHashTableOps*, \
         (const PLDHashTableOps* aOps), (aOps))                  \
-  Macro(AllocateMemory, void*, (size_t aSize, AllocatedMemoryKind aKind), (aSize, aKind)) \
   Macro(InternalThingIndex, size_t, (void* aThing), (aThing))   \
   Macro(InternalVirtualThingName, const char*, (void* aThing), (aThing)) \
-  Macro(NewCheckpoint, bool, (bool aTemporary), (aTemporary))   \
-  Macro(SpewEnabled, bool, (), ())
+  Macro(ExecutionProgressCounter, ProgressCounter*, (), ())     \
+  Macro(IsInternalScript, bool, (const char* aURL), (aURL))     \
+  Macro(DefineRecordReplayControlObject, bool, (JSContext* aCx, JSObject* aObj), (aCx, aObj))
 
 #define FOR_EACH_INTERFACE_VOID(Macro)                          \
   Macro(InternalBeginOrderedAtomicAccess, (), ())               \
   Macro(InternalEndOrderedAtomicAccess, (), ())                 \
   Macro(InternalBeginPassThroughThreadEvents, (), ())           \
   Macro(InternalEndPassThroughThreadEvents, (), ())             \
   Macro(InternalBeginDisallowThreadEvents, (), ())              \
   Macro(InternalEndDisallowThreadEvents, (), ())                \
   Macro(InternalBeginCaptureEventStacks, (), ())                \
   Macro(InternalEndCaptureEventStacks, (), ())                  \
   Macro(InternalRecordReplayBytes,                              \
         (void* aData, size_t aSize), (aData, aSize))            \
-  Macro(DisallowUnhandledDivergeFromRecording, (), ())          \
   Macro(NotifyUnrecordedWait,                                   \
         (const std::function<void()>& aCallback), (aCallback))  \
   Macro(MaybeWaitForCheckpointSave, (), ())                     \
   Macro(InternalInvalidateRecording, (const char* aWhy), (aWhy)) \
   Macro(InternalDestroyPLDHashTableCallbacks,                   \
         (const PLDHashTableOps* aOps), (aOps))                  \
   Macro(InternalMovePLDHashTableContents,                       \
         (const PLDHashTableOps* aFirstOps, const PLDHashTableOps* aSecondOps), \
         (aFirstOps, aSecondOps))                                \
-  Macro(SetCheckpointHooks,                                     \
-        (BeforeCheckpointHook aBefore, AfterCheckpointHook aAfter), \
-        (aBefore, aAfter))                                      \
-  Macro(ResumeExecution, (), ())                                \
-  Macro(RestoreCheckpointAndResume, (const CheckpointId& aId), (aId)) \
-  Macro(DivergeFromRecording, (), ())                           \
-  Macro(DeallocateMemory,                                       \
-        (void* aAddress, size_t aSize, AllocatedMemoryKind aKind), (aAddress, aSize, aKind)) \
   Macro(SetWeakPointerJSRoot,                                   \
-        (const void* aPtr, void* aJSObj), (aPtr, aJSObj))       \
+        (const void* aPtr, JSObject* aJSObj), (aPtr, aJSObj))   \
   Macro(RegisterTrigger,                                        \
         (void* aObj, const std::function<void()>& aCallback),   \
         (aObj, aCallback))                                      \
   Macro(UnregisterTrigger,                                      \
         (void* aObj), (aObj))                                   \
   Macro(ActivateTrigger, (void* aObj), (aObj))                  \
   Macro(ExecuteTriggers, (), ())                                \
   Macro(InternalRecordReplayAssert, (const char* aFormat, va_list aArgs), (aFormat, aArgs)) \
   Macro(InternalRecordReplayAssertBytes,                        \
         (const void* aData, size_t aSize), (aData, aSize))      \
   Macro(InternalRegisterThing, (void* aThing), (aThing))        \
   Macro(InternalUnregisterThing, (void* aThing), (aThing))      \
   Macro(InternalRecordReplayDirective, (long aDirective), (aDirective)) \
-  Macro(InternalPrint, (const char* aFormat, va_list aArgs), (aFormat, aArgs))
+  Macro(BeginContentParse,                                      \
+        (const void* aToken, const char* aURL, const char* aContentType), \
+        (aToken, aURL, aContentType))                           \
+  Macro(AddContentParseData,                                    \
+        (const void* aToken, const char16_t* aBuffer, size_t aLength), \
+        (aToken, aBuffer, aLength))                             \
+  Macro(EndContentParse, (const void* aToken), (aToken))
 
 #define DECLARE_SYMBOL(aName, aReturnType, aFormals, _) \
   static aReturnType (*gPtr ##aName) aFormals;
 #define DECLARE_SYMBOL_VOID(aName, aFormals, _)  DECLARE_SYMBOL(aName, void, aFormals, _)
 
 FOR_EACH_INTERFACE(DECLARE_SYMBOL)
 FOR_EACH_INTERFACE_VOID(DECLARE_SYMBOL_VOID)
 
--- a/mfbt/RecordReplay.h
+++ b/mfbt/RecordReplay.h
@@ -13,16 +13,18 @@
 #include "mozilla/GuardObjects.h"
 #include "mozilla/TemplateLib.h"
 #include "mozilla/Types.h"
 
 #include <functional>
 #include <stdarg.h>
 
 struct PLDHashTableOps;
+struct JSContext;
+class JSObject;
 
 namespace mozilla {
 namespace recordreplay {
 
 // Record/Replay Overview.
 //
 // Firefox content processes can be specified to record or replay their
 // behavior. Whether a process is recording or replaying is initialized at the
@@ -196,17 +198,17 @@ static inline void InvalidateRecording(c
 static inline const PLDHashTableOps* GeneratePLDHashTableCallbacks(const PLDHashTableOps* aOps);
 static inline const PLDHashTableOps* UnwrapPLDHashTableCallbacks(const PLDHashTableOps* aOps);
 static inline void DestroyPLDHashTableCallbacks(const PLDHashTableOps* aOps);
 static inline void MovePLDHashTableContents(const PLDHashTableOps* aFirstOps,
                                             const PLDHashTableOps* aSecondOps);
 
 // Associate an arbitrary pointer with a JS object root while replaying. This
 // is useful for replaying the behavior of weak pointers.
-MFBT_API void SetWeakPointerJSRoot(const void* aPtr, /*JSObject*/void* aJSObj);
+MFBT_API void SetWeakPointerJSRoot(const void* aPtr, JSObject* aJSObj);
 
 // API for ensuring that a function executes at a consistent point when
 // recording or replaying. This is primarily needed for finalizers and other
 // activity during a GC that can perform recorded events (because GCs can
 // occur at different times and behave differently between recording and
 // replay, thread events are disallowed during a GC). Triggers can be
 // registered at a point where thread events are allowed, then activated at
 // a point where thread events are not allowed. When recording, the trigger's
@@ -318,170 +320,69 @@ enum class ProcessKind {
 
 // Command line option for specifying the record/replay kind of a process.
 static const char gProcessKindOption[] = "-recordReplayKind";
 
 // Command line option for specifying the recording file to use.
 static const char gRecordingFileOption[] = "-recordReplayFile";
 
 ///////////////////////////////////////////////////////////////////////////////
-// Devtools API
+// JS interface
 ///////////////////////////////////////////////////////////////////////////////
 
-// This interface is used by devtools C++ code (e.g. the JS Debugger) running
-// in a child or middleman process.
-
-// The ID of a checkpoint in a child process. Checkpoints are either normal or
-// temporary. Normal checkpoints occur at the same point in the recording and
-// all replays, while temporary checkpoints are not used while recording and
-// may be at different points in different replays.
-struct CheckpointId
-{
-  // ID of the most recent normal checkpoint, which are numbered in sequence
-  // starting at FirstCheckpointId.
-  size_t mNormal;
-
-  // Special IDs for normal checkpoints.
-  static const size_t Invalid = 0;
-  static const size_t First = 1;
-
-  // How many temporary checkpoints have been generated since the most recent
-  // normal checkpoint, zero if this represents the normal checkpoint itself.
-  size_t mTemporary;
-
-  explicit CheckpointId(size_t aNormal = Invalid, size_t aTemporary = 0)
-    : mNormal(aNormal), mTemporary(aTemporary)
-  {}
-
-  inline bool operator==(const CheckpointId& o) const {
-    return mNormal == o.mNormal && mTemporary == o.mTemporary;
-  }
-
-  inline bool operator!=(const CheckpointId& o) const {
-    return mNormal != o.mNormal || mTemporary != o.mTemporary;
-  }
-};
+// Get the counter used to keep track of how much progress JS execution has
+// made while running on the main thread. Progress must advance whenever a JS
+// function is entered or loop entry point is reached, so that no script
+// location may be hit twice while the progress counter is the same. See
+// JSControl.h for more.
+typedef uint64_t ProgressCounter;
+MFBT_API ProgressCounter* ExecutionProgressCounter();
 
-// Signature for the hook called when running forward, immediately before
-// hitting a normal or temporary checkpoint.
-typedef void (*BeforeCheckpointHook)();
-
-// Signature for the hook called immediately after hitting a normal or
-// temporary checkpoint, either when running forward or after rewinding.
-typedef void (*AfterCheckpointHook)(const CheckpointId& aCheckpoint);
-
-// Set hooks to call when encountering checkpoints.
-MFBT_API void SetCheckpointHooks(BeforeCheckpointHook aBeforeCheckpoint,
-                                 AfterCheckpointHook aAfterCheckpoint);
-
-// When paused at a breakpoint or at a checkpoint, unpause and proceed with
-// execution.
-MFBT_API void ResumeExecution();
-
-// When paused at a breakpoint or at a checkpoint, restore a checkpoint that
-// was saved earlier and resume execution.
-MFBT_API void RestoreCheckpointAndResume(const CheckpointId& aCheckpoint);
+static inline void
+AdvanceExecutionProgressCounter()
+{
+  ++*ExecutionProgressCounter();
+}
 
-// Allow execution after this point to diverge from the recording. Execution
-// will remain diverged until an earlier checkpoint is restored.
-//
-// If an unhandled divergence occurs (see the 'Recording Divergence' comment
-// in ProcessRewind.h) then the process rewinds to the most recent saved
-// checkpoint.
-MFBT_API void DivergeFromRecording();
+// Return whether a script is internal to the record/replay infrastructure,
+// may run non-deterministically between recording and replaying, and whose
+// execution must not update the progress counter.
+MFBT_API bool IsInternalScript(const char* aURL);
 
-// After a call to DivergeFromRecording(), this may be called to prevent future
-// unhandled divergence from causing earlier checkpoints to be restored
-// (the process will immediately crash instead). This state lasts until a new
-// call to DivergeFromRecording, or to an explicit restore of an earlier
-// checkpoint.
-MFBT_API void DisallowUnhandledDivergeFromRecording();
-
-// Note a checkpoint at the current execution position. This checkpoint will be
-// saved if either (a) it is temporary, or (b) the middleman has instructed
-// this process to save this normal checkpoint. This method returns true if the
-// checkpoint was just saved, and false if it was just restored.
-MFBT_API bool NewCheckpoint(bool aTemporary);
+// Define a RecordReplayControl object on the specified global object, with
+// methods specialized to the current recording/replaying or middleman process
+// kind.
+MFBT_API bool DefineRecordReplayControlObject(JSContext* aCx, JSObject* aObj);
 
-// Print information about record/replay state. Printing is independent from
-// the recording and will be printed by any recording, replaying, or middleman
-// process. Spew is only printed when enabled via the RECORD_REPLAY_SPEW
-// environment variable.
-static inline void Print(const char* aFormat, ...);
-static inline void PrintSpew(const char* aFormat, ...);
-MFBT_API bool SpewEnabled();
-
-///////////////////////////////////////////////////////////////////////////////
-// Allocation policies
-///////////////////////////////////////////////////////////////////////////////
+// Notify the infrastructure that some URL which contains JavaScript is
+// being parsed. This is used to provide the complete contents of the URL to
+// devtools code when it is inspecting the state of this process; that devtools
+// code can't simply fetch the URL itself since it may have been changed since
+// the recording was made or may no longer exist. The token for a parse may not
+// be used in other parses until after EndContentParse() is called.
+MFBT_API void BeginContentParse(const void* aToken,
+                                const char* aURL, const char* aContentType);
 
-// Type describing what kind of memory to allocate/deallocate by APIs below.
-// TrackedMemoryKind is reserved for memory that is saved and restored when
-// saving or restoring checkpoints. All other values refer to memory that is
-// untracked, and whose contents are preserved when restoring checkpoints.
-// Different values are used to distinguish different classes of memory for
-// diagnosing leaks and reporting memory usage.
-typedef size_t AllocatedMemoryKind;
-static const AllocatedMemoryKind TrackedMemoryKind = 0;
+// Add some parse data to an existing content parse.
+MFBT_API void AddContentParseData(const void* aToken,
+                                  const char16_t* aBuffer, size_t aLength);
 
-// Memory kind to use for untracked debugger memory.
-static const AllocatedMemoryKind DebuggerAllocatedMemoryKind = 1;
+// Mark a content parse as having completed.
+MFBT_API void EndContentParse(const void* aToken);
 
-// Allocate or deallocate a block of memory of a particular kind. Allocated
-// memory is initially zeroed.
-MFBT_API void* AllocateMemory(size_t aSize, AllocatedMemoryKind aKind);
-MFBT_API void DeallocateMemory(void* aAddress, size_t aSize, AllocatedMemoryKind aKind);
-
-// Allocation policy for managing memory of a particular kind.
-template <AllocatedMemoryKind Kind>
-class AllocPolicy
+// Perform an entire content parse, when the entire URL is available at once.
+static inline void
+NoteContentParse(const void* aToken,
+                 const char* aURL, const char* aContentType,
+                 const char16_t* aBuffer, size_t aLength)
 {
-public:
-  template <typename T>
-  T* maybe_pod_calloc(size_t aNumElems) {
-    if (aNumElems & tl::MulOverflowMask<sizeof(T)>::value) {
-      MOZ_CRASH();
-    }
-    // Note: AllocateMemory always returns zeroed memory.
-    return static_cast<T*>(AllocateMemory(aNumElems * sizeof(T), Kind));
-  }
-
-  template <typename T>
-  void free_(T* aPtr, size_t aSize) {
-    DeallocateMemory(aPtr, aSize * sizeof(T), Kind);
-  }
-
-  template <typename T>
-  T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
-    T* res = maybe_pod_calloc<T>(aNewSize);
-    memcpy(res, aPtr, aOldSize * sizeof(T));
-    free_<T>(aPtr, aOldSize);
-    return res;
-  }
-
-  template <typename T>
-  T* maybe_pod_malloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
-
-  template <typename T>
-  T* pod_malloc(size_t aNumElems) { return maybe_pod_malloc<T>(aNumElems); }
-
-  template <typename T>
-  T* pod_calloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
-
-  template <typename T>
-  T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
-    return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
-  }
-
-  void reportAllocOverflow() const {}
-
-  MOZ_MUST_USE bool checkSimulatedOOM() const {
-    return true;
-  }
-};
+  BeginContentParse(aToken, aURL, aContentType);
+  AddContentParseData(aToken, aBuffer, aLength);
+  EndContentParse(aToken);
+}
 
 ///////////////////////////////////////////////////////////////////////////////
 // API inline function implementation
 ///////////////////////////////////////////////////////////////////////////////
 
 // Define inline wrappers on builds where recording/replaying is enabled.
 #if defined(XP_MACOSX) && defined(NIGHTLY_BUILD)
 
@@ -566,31 +467,12 @@ RecordReplayAssert(const char* aFormat, 
   if (IsRecordingOrReplaying()) {
     va_list ap;
     va_start(ap, aFormat);
     InternalRecordReplayAssert(aFormat, ap);
     va_end(ap);
   }
 }
 
-MFBT_API void InternalPrint(const char* aFormat, va_list aArgs);
-
-#define MOZ_MakeRecordReplayPrinter(aName, aSpewing)            \
-  static inline void                                            \
-  aName(const char* aFormat, ...)                               \
-  {                                                             \
-    if ((IsRecordingOrReplaying() || IsMiddleman()) && (!aSpewing || SpewEnabled())) { \
-      va_list ap;                                               \
-      va_start(ap, aFormat);                                    \
-      InternalPrint(aFormat, ap);                               \
-      va_end(ap);                                               \
-    }                                                           \
-  }
-
-MOZ_MakeRecordReplayPrinter(Print, false)
-MOZ_MakeRecordReplayPrinter(PrintSpew, true)
-
-#undef MOZ_MakeRecordReplayPrinter
-
 } // recordreplay
 } // mozilla
 
 #endif /* mozilla_RecordReplay_h */
--- a/mobile/android/base/java/org/mozilla/gecko/GeckoJavaSampler.java
+++ b/mobile/android/base/java/org/mozilla/gecko/GeckoJavaSampler.java
@@ -182,24 +182,32 @@ public class GeckoJavaSampler {
     public static void unpause() {
         synchronized (GeckoJavaSampler.class) {
             sSamplingRunnable.mPauseSampler = false;
         }
     }
 
     @WrapForJNI
     public static void stop() {
+        Thread samplingThread;
+
         synchronized (GeckoJavaSampler.class) {
             if (sSamplingThread == null) {
                 return;
             }
 
             sSamplingRunnable.mStopSampler = true;
+            samplingThread = sSamplingThread;
+            sSamplingThread = null;
+            sSamplingRunnable = null;
+        }
+
+        boolean retry = true;
+        while (retry) {
             try {
-                sSamplingThread.join();
+                samplingThread.join();
+                retry = false;
             } catch (InterruptedException e) {
                 e.printStackTrace();
             }
-            sSamplingThread = null;
-            sSamplingRunnable = null;
         }
     }
 }
--- a/testing/web-platform/meta/MANIFEST.json
+++ b/testing/web-platform/meta/MANIFEST.json
@@ -318647,19 +318647,19 @@
     ]
    ],
    "css/css-logical/animation-002.html": [
     [
      "/css/css-logical/animation-002.html",
      {}
     ]
    ],
-   "css/css-logical/animation-003.tenative.html": [
-    [
-     "/css/css-logical/animation-003.tenative.html",
+   "css/css-logical/animation-003.tentative.html": [
+    [
+     "/css/css-logical/animation-003.tentative.html",
      {}
     ]
    ],
    "css/css-logical/logical-box-border-color.html": [
     [
      "/css/css-logical/logical-box-border-color.html",
      {}
     ]
@@ -521842,17 +521842,17 @@
   "css/css-logical/animation-001.html": [
    "361b8af532357e065f01504b9553d5f70cee38ae",
    "testharness"
   ],
   "css/css-logical/animation-002.html": [
    "205a6330ecf0bf69dc3fca0b4f4afa9850e3a782",
    "testharness"
   ],
-  "css/css-logical/animation-003.tenative.html": [
+  "css/css-logical/animation-003.tentative.html": [
    "bdb7e952eb7fecf402f64129a00b511d89470195",
    "testharness"
   ],
   "css/css-logical/cascading-001-ref.html": [
    "b95cd62ce3592f653aaa54de0dbc27e16618064b",
    "support"
   ],
   "css/css-logical/cascading-001.html": [
--- a/testing/web-platform/meta/css/css-logical/__dir__.ini
+++ b/testing/web-platform/meta/css/css-logical/__dir__.ini
@@ -1,1 +1,3 @@
-prefs: [dom.animations-api.core.enabled:true]
+prefs: [dom.animations-api.core.enabled:true,
+        dom.animations-api.getAnimations.enabled:true,
+        dom.animations-api.implicit-keyframes.enabled:true]
--- a/testing/web-platform/meta/css/css-logical/animation-001.html.ini
+++ b/testing/web-platform/meta/css/css-logical/animation-001.html.ini
@@ -1,10 +1,9 @@
 [animation-001.html]
-  prefs: [dom.animations-api.implicit-keyframes.enabled:true]
   [Logical shorthands follow the usual prioritization based on number of component longhands]
     expected: FAIL
     bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1289155, https://bugzilla.mozilla.org/show_bug.cgi?id=1370404
   [Physical longhands win over logical shorthands]
     expected: FAIL
     bug: https://bugzilla.mozilla.org/show_bug.cgi?id=1370404
   [Physical shorthands and logical shorthands can be mixed]
     expected: FAIL
rename from testing/web-platform/tests/css/css-logical/animation-003.tenative.html
rename to testing/web-platform/tests/css/css-logical/animation-003.tentative.html
--- a/toolkit/content/license.html
+++ b/toolkit/content/license.html
@@ -165,16 +165,17 @@
       <li><a href="about:license#hunspell-ru">Russian Spellchecking Dictionary License</a></li>
       <li><a href="about:license#sctp">SCTP Licenses</a></li>
       <li><a href="about:license#skia">Skia License</a></li>
       <li><a href="about:license#snappy">Snappy License</a></li>
       <li><a href="about:license#sprintf.js">sprintf.js License</a></li>
       <li><a href="about:license#sunsoft">SunSoft License</a></li>
       <li><a href="about:license#superfasthash">SuperFastHash License</a></li>
       <li><a href="about:license#synstructure">synstructure License</a></li>
+      <li><a href="about:license#udis86">udis86 License</a></li>
       <li><a href="about:license#unicase">unicase License</a></li>
       <li><a href="about:license#unicode">Unicode License</a></li>
       <li><a href="about:license#ucal">University of California License</a></li>
       <li><a href="about:license#unreachable">unreachable License</a></li>
       <li><a href="about:license#hunspell-en">English Spellchecking Dictionary Licenses</a></li>
       <li><a href="about:license#utf8-ranges">utf8-ranges License</a></li>
       <li><a href="about:license#v8">V8 License</a></li>
       <li><a href="about:license#validator">Validator License</a></li>
@@ -6109,16 +6110,48 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE F
 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 THE SOFTWARE.
 </pre>
 
 
     <hr>
 
+    <h1><a id="udis86"></a>udis86 License</h1>
+
+    <p>This license applies to files in the directory
+    <code>toolkit/recordreplay/udis86</code>.</p>
+
+<pre>
+Copyright (c) 2002-2012, Vivek Thampi <vivek.mt@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+   this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+</pre>
+
+    <hr>
+
     <h1><a id="unicase"></a>unicase License</h1>
 
     <p>This license applies to files in the directory
     <code>third_party/rust/unicase</code>.</p>
 
 <pre>
 Copyright (c) 2014-2015 Sean McArthur
 
--- a/toolkit/moz.build
+++ b/toolkit/moz.build
@@ -12,16 +12,17 @@ DIRS += [
     'locales',
     'modules',
     'mozapps/downloads',
     'mozapps/extensions',
     'mozapps/handling',
     'mozapps/preferences',
     'pluginproblem',
     'profile',
+    'recordreplay',
     'themes',
 ]
 
 if CONFIG['MOZ_UPDATER'] and CONFIG['MOZ_WIDGET_TOOLKIT'] != 'android':
     DIRS += ['mozapps/update']
 
 if CONFIG['MOZ_MAINTENANCE_SERVICE'] or CONFIG['MOZ_UPDATER']:
     DIRS += [
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Assembler.cpp
@@ -0,0 +1,345 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Assembler.h"
+
+#include "ProcessRecordReplay.h"
+#include "udis86/types.h"
+
+#include <sys/mman.h>
+
+namespace mozilla {
+namespace recordreplay {
+
+Assembler::Assembler()
+  : mCursor(nullptr)
+  , mCursorEnd(nullptr)
+  , mCanAllocateStorage(true)
+{}
+
+Assembler::Assembler(uint8_t* aStorage, size_t aSize)
+  : mCursor(aStorage)
+  , mCursorEnd(aStorage + aSize)
+  , mCanAllocateStorage(false)
+{}
+
+Assembler::~Assembler()
+{
+  // Patch each jump to the point where the jump's target was copied, if there
+  // is one.
+  for (auto pair : mJumps) {
+    uint8_t* source = pair.first;
+    uint8_t* target = pair.second;
+
+    for (auto copyPair : mCopiedInstructions) {
+      if (copyPair.first == target) {
+        PatchJump(source, copyPair.second);
+	break;
+      }
+    }
+  }
+}
+
+void
+Assembler::NoteOriginalInstruction(uint8_t* aIp)
+{
+  mCopiedInstructions.emplaceBack(aIp, Current());
+}
+
+void
+Assembler::Advance(size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aSize <= MaximumAdvance);
+  mCursor += aSize;
+}
+
+static const size_t JumpBytes = 17;
+
+uint8_t*
+Assembler::Current()
+{
+  // Reallocate the buffer if there is not enough space. We need enough for the
+  // maximum space used by any of the assembling functions, as well as for a
+  // following jump for fallthrough to the next allocated space.
+  if (size_t(mCursorEnd - mCursor) <= MaximumAdvance + JumpBytes) {
+    MOZ_RELEASE_ASSERT(mCanAllocateStorage);
+
+    // Allocate some writable, executable memory.
+    static const size_t BufferSize = PageSize;
+    uint8_t* buffer = new uint8_t[PageSize];
+    UnprotectExecutableMemory(buffer, PageSize);
+
+    if (mCursor) {
+      // Patch a jump for fallthrough from the last allocation.
+      MOZ_RELEASE_ASSERT(size_t(mCursorEnd - mCursor) >= JumpBytes);
+      PatchJump(mCursor, buffer);
+    }
+
+    mCursor = buffer;
+    mCursorEnd = &buffer[BufferSize];
+  }
+
+  return mCursor;
+}
+
+static void
+Push16(uint8_t** aIp, uint16_t aValue)
+{
+  (*aIp)[0] = 0x66;
+  (*aIp)[1] = 0x68;
+  *reinterpret_cast<uint16_t*>(*aIp + 2) = aValue;
+  (*aIp) += 4;
+}
+
+/* static */ void
+Assembler::PatchJump(uint8_t* aIp, void* aTarget)
+{
+  // Push the target literal onto the stack, 2 bytes at a time. This is
+  // apparently the best way of getting an arbitrary 8 byte literal onto the
+  // stack, as 4 byte literals we push will be sign extended to 8 bytes.
+  size_t ntarget = reinterpret_cast<size_t>(aTarget);
+  Push16(&aIp, ntarget >> 48);
+  Push16(&aIp, ntarget >> 32);
+  Push16(&aIp, ntarget >> 16);
+  Push16(&aIp, ntarget);
+  *aIp = 0xC3; // ret
+}
+
+void
+Assembler::Jump(void* aTarget)
+{
+  PatchJump(Current(), aTarget);
+  mJumps.emplaceBack(Current(), (uint8_t*) aTarget);
+  Advance(JumpBytes);
+}
+
+static uint8_t
+OppositeJump(uint8_t aOpcode)
+{
+  // Get the opposite single byte jump opcode for a one or two byte conditional
+  // jump. Opposite opcodes are adjacent, e.g. 0x7C -> jl and 0x7D -> jge.
+  if (aOpcode >= 0x80 && aOpcode <= 0x8F) {
+    aOpcode -= 0x10;
+  } else {
+    MOZ_RELEASE_ASSERT(aOpcode >= 0x70 && aOpcode <= 0x7F);
+  }
+  return (aOpcode & 1) ? aOpcode - 1 : aOpcode + 1;
+}
+
+void
+Assembler::ConditionalJump(uint8_t aCode, void* aTarget)
+{
+  uint8_t* ip = Current();
+  ip[0] = OppositeJump(aCode);
+  ip[1] = (uint8_t) JumpBytes;
+  Advance(2);
+  Jump(aTarget);
+}
+
+void
+Assembler::CopyInstruction(uint8_t* aIp, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aSize <= MaximumInstructionLength);
+  memcpy(Current(), aIp, aSize);
+  Advance(aSize);
+}
+
+void
+Assembler::PushRax()
+{
+  NewInstruction(0x50);
+}
+
+void
+Assembler::PopRax()
+{
+  NewInstruction(0x58);
+}
+
+void
+Assembler::JumpToRax()
+{
+  NewInstruction(0xFF, 0xE0);
+}
+
+void
+Assembler::CallRax()
+{
+  NewInstruction(0xFF, 0xD0);
+}
+
+void
+Assembler::LoadRax(size_t aWidth)
+{
+  switch (aWidth) {
+  case 1: NewInstruction(0x8A, 0x00); break;
+  case 2: NewInstruction(0x66, 0x8B, 0x00); break;
+  case 4: NewInstruction(0x8B, 0x00); break;
+  case 8: NewInstruction(0x48, 0x8B, 0x00); break;
+  default: MOZ_CRASH();
+  }
+}
+
+void
+Assembler::CompareRaxWithTopOfStack()
+{
+  NewInstruction(0x48, 0x39, 0x04, 0x24);
+}
+
+void
+Assembler::PushRbx()
+{
+  NewInstruction(0x53);
+}
+
+void
+Assembler::PopRbx()
+{
+  NewInstruction(0x5B);
+}
+
+void
+Assembler::StoreRbxToRax(size_t aWidth)
+{
+  switch (aWidth) {
+  case 1: NewInstruction(0x88, 0x18); break;
+  case 2: NewInstruction(0x66, 0x89, 0x18); break;
+  case 4: NewInstruction(0x89, 0x18); break;
+  case 8: NewInstruction(0x48, 0x89, 0x18); break;
+  default: MOZ_CRASH();
+  }
+}
+
+void
+Assembler::CompareValueWithRax(uint8_t aValue, size_t aWidth)
+{
+  switch (aWidth) {
+  case 1: NewInstruction(0x3C, aValue); break;
+  case 2: NewInstruction(0x66, 0x83, 0xF8, aValue); break;
+  case 4: NewInstruction(0x83, 0xF8, aValue); break;
+  case 8: NewInstruction(0x48, 0x83, 0xF8, aValue); break;
+  default: MOZ_CRASH();
+  }
+}
+
+static const size_t MoveImmediateBytes = 10;
+
+/* static */ void
+Assembler::PatchMoveImmediateToRax(uint8_t* aIp, void* aValue)
+{
+  aIp[0] = 0x40 | (1 << 3);
+  aIp[1] = 0xB8;
+  *reinterpret_cast<void**>(aIp + 2) = aValue;
+}
+
+void
+Assembler::MoveImmediateToRax(void* aValue)
+{
+  PatchMoveImmediateToRax(Current(), aValue);
+  Advance(MoveImmediateBytes);
+}
+
+void
+Assembler::MoveRaxToRegister(/*ud_type*/ int aRegister)
+{
+  MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
+
+  uint8_t* ip = Current();
+  if (aRegister <= UD_R_RDI) {
+    ip[0] = 0x48;
+    ip[1] = 0x89;
+    ip[2] = 0xC0 + aRegister - UD_R_RAX;
+  } else {
+    ip[0] = 0x49;
+    ip[1] = 0x89;
+    ip[2] = 0xC0 + aRegister - UD_R_R8;
+  }
+  Advance(3);
+}
+
+void
+Assembler::MoveRegisterToRax(/*ud_type*/ int aRegister)
+{
+  MOZ_RELEASE_ASSERT(aRegister == NormalizeRegister(aRegister));
+
+  uint8_t* ip = Current();
+  if (aRegister <= UD_R_RDI) {
+    ip[0] = 0x48;
+    ip[1] = 0x89;
+    ip[2] = 0xC0 + (aRegister - UD_R_RAX) * 8;
+  } else {
+    ip[0] = 0x4C;
+    ip[1] = 0x89;
+    ip[2] = 0xC0 + (aRegister - UD_R_R8) * 8;
+  }
+  Advance(3);
+}
+
+/* static */ /*ud_type*/ int
+Assembler::NormalizeRegister(/*ud_type*/ int aRegister)
+{
+  if (aRegister >= UD_R_AL && aRegister <= UD_R_R15B) {
+    return aRegister - UD_R_AL + UD_R_RAX;
+  }
+  if (aRegister >= UD_R_AX && aRegister <= UD_R_R15W) {
+    return aRegister - UD_R_AX + UD_R_RAX;
+  }
+  if (aRegister >= UD_R_EAX && aRegister <= UD_R_R15D) {
+    return aRegister - UD_R_EAX + UD_R_RAX;
+  }
+  if (aRegister >= UD_R_RAX && aRegister <= UD_R_R15) {
+    return aRegister;
+  }
+  return UD_NONE;
+}
+
+/* static */ bool
+Assembler::CanPatchShortJump(uint8_t* aIp, void* aTarget)
+{
+  return (aIp + 2 - 128 <= aTarget) && (aIp + 2 + 127 >= aTarget);
+}
+
+/* static */ void
+Assembler::PatchShortJump(uint8_t* aIp, void* aTarget)
+{
+  MOZ_RELEASE_ASSERT(CanPatchShortJump(aIp, aTarget));
+  aIp[0] = 0xEB;
+  aIp[1] = uint8_t(static_cast<uint8_t*>(aTarget) - aIp - 2);
+}
+
+/* static */ void
+Assembler::PatchJumpClobberRax(uint8_t* aIp, void* aTarget)
+{
+  PatchMoveImmediateToRax(aIp, aTarget);
+  aIp[10] = 0x50; // push %rax
+  aIp[11] = 0xC3; // ret
+}
+
+/* static */ void
+Assembler::PatchClobber(uint8_t* aIp)
+{
+  aIp[0] = 0xCC; // int3
+}
+
+static uint8_t*
+PageStart(uint8_t* aPtr)
+{
+  static_assert(sizeof(size_t) == sizeof(uintptr_t), "Unsupported Platform");
+  return reinterpret_cast<uint8_t*>(reinterpret_cast<size_t>(aPtr) & ~(PageSize - 1));
+}
+
+void
+UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize)
+{
+  MOZ_ASSERT(aSize);
+  uint8_t* pageStart = PageStart(aAddress);
+  uint8_t* pageEnd = PageStart(aAddress + aSize - 1) + PageSize;
+  int ret = mprotect(pageStart, pageEnd - pageStart, PROT_READ | PROT_EXEC | PROT_WRITE);
+  MOZ_RELEASE_ASSERT(ret >= 0);
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Assembler.h
@@ -0,0 +1,181 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_Assembler_h
+#define mozilla_recordreplay_Assembler_h
+
+#include "InfallibleVector.h"
+
+#include <utility>
+
+namespace mozilla {
+namespace recordreplay {
+
+// Assembler for x64 instructions. This is a simple assembler that is primarily
+// designed for use in copying instructions from a function that is being
+// redirected.
+class Assembler
+{
+public:
+  // Create an assembler that allocates its own instruction storage. Assembled
+  // code will never be reclaimed by the system.
+  Assembler();
+
+  // Create an assembler that uses the specified memory range for instruction
+  // storage.
+  Assembler(uint8_t* aStorage, size_t aSize);
+
+  ~Assembler();
+
+  // Mark the point at which we start copying an instruction in the original
+  // range.
+  void NoteOriginalInstruction(uint8_t* aIp);
+
+  // Get the address where the next assembled instruction will be placed.
+  uint8_t* Current();
+
+///////////////////////////////////////////////////////////////////////////////
+// Routines for assembling instructions in new instruction storage
+///////////////////////////////////////////////////////////////////////////////
+
+  // Jump to aTarget. If aTarget is in the range of instructions being copied,
+  // the target will be the copy of aTarget instead.
+  void Jump(void* aTarget);
+
+  // Conditionally jump to aTarget, depending on the short jump opcode aCode.
+  // If aTarget is in the range of instructions being copied, the target will
+  // be the copy of aTarget instead.
+  void ConditionalJump(uint8_t aCode, void* aTarget);
+
+  // Copy an instruction verbatim from aIp.
+  void CopyInstruction(uint8_t* aIp, size_t aSize);
+
+  // push/pop %rax
+  void PushRax();
+  void PopRax();
+
+  // jump *%rax
+  void JumpToRax();
+
+  // call *%rax
+  void CallRax();
+
+  // movq/movl/movb 0(%rax), %rax
+  void LoadRax(size_t aWidth);
+
+  // cmpq %rax, 0(%rsp)
+  void CompareRaxWithTopOfStack();
+
+  // push/pop %rbx
+  void PushRbx();
+  void PopRbx();
+
+  // movq/movl/movb %rbx, 0(%rax)
+  void StoreRbxToRax(size_t aWidth);
+
+  // cmpq/cmpb $literal8, %rax
+  void CompareValueWithRax(uint8_t aValue, size_t aWidth);
+
+  // movq $value, %rax
+  void MoveImmediateToRax(void* aValue);
+
+  // movq %rax, register
+  void MoveRaxToRegister(/*ud_type*/ int aRegister);
+
+  // movq register, %rax
+  void MoveRegisterToRax(/*ud_type*/ int aRegister);
+
+  // Normalize a Udis86 register to its 8 byte version, returning UD_NONE/zero
+  // for unexpected registers.
+  static /*ud_type*/ int NormalizeRegister(/*ud_type*/ int aRegister);
+
+///////////////////////////////////////////////////////////////////////////////
+// Routines for assembling instructions at arbitrary locations
+///////////////////////////////////////////////////////////////////////////////
+
+  // Return whether it is possible to patch a short jump to aTarget from aIp.
+  static bool CanPatchShortJump(uint8_t* aIp, void* aTarget);
+
+  // Patch a short jump to aTarget at aIp.
+  static void PatchShortJump(uint8_t* aIp, void* aTarget);
+
+  // Patch a long jump to aTarget at aIp. Rax may be clobbered.
+  static void PatchJumpClobberRax(uint8_t* aIp, void* aTarget);
+
+  // Patch the value used in an earlier MoveImmediateToRax call.
+  static void PatchMoveImmediateToRax(uint8_t* aIp, void* aValue);
+
+  // Patch an int3 breakpoint instruction at Ip.
+  static void PatchClobber(uint8_t* aIp);
+
+private:
+  // Patch a jump that doesn't clobber any instructions.
+  static void PatchJump(uint8_t* aIp, void* aTarget);
+
+  // Consume some instruction storage.
+  void Advance(size_t aSize);
+
+  // The maximum amount we can write at a time without a jump potentially
+  // being introduced into the instruction stream.
+  static const size_t MaximumAdvance = 20;
+
+  inline size_t CountBytes() { return 0; }
+
+  template <typename... Tail>
+  inline size_t CountBytes(uint8_t aByte, Tail... aMoreBytes) {
+    return 1 + CountBytes(aMoreBytes...);
+  }
+
+  inline void CopyBytes(uint8_t* aIp) {}
+
+  template <typename... Tail>
+  inline void CopyBytes(uint8_t* aIp, uint8_t aByte, Tail... aMoreBytes) {
+    *aIp = aByte;
+    CopyBytes(aIp + 1, aMoreBytes...);
+  }
+
+  // Write a complete instruction with bytes specified as parameters.
+  template <typename... ByteList>
+  inline void NewInstruction(ByteList... aBytes) {
+    size_t numBytes = CountBytes(aBytes...);
+    MOZ_ASSERT(numBytes <= MaximumAdvance);
+    uint8_t* ip = Current();
+    CopyBytes(ip, aBytes...);
+    Advance(numBytes);
+  }
+
+  // Storage for assembling new instructions.
+  uint8_t* mCursor;
+  uint8_t* mCursorEnd;
+  bool mCanAllocateStorage;
+
+  // Association between the instruction original and copy pointers, for all
+  // instructions that have been copied.
+  InfallibleVector<std::pair<uint8_t*,uint8_t*>> mCopiedInstructions;
+
+  // For jumps we have copied, association between the source (in generated
+  // code) and target (in the original code) of the jump. These will be updated
+  // to refer to their copy (if there is one) in generated code in the
+  // assembler's destructor.
+  InfallibleVector<std::pair<uint8_t*,uint8_t*>> mJumps;
+};
+
+// The number of instruction bytes required for a short jump.
+static const size_t ShortJumpBytes = 2;
+
+// The number of instruction bytes required for a jump that may clobber rax.
+static const size_t JumpBytesClobberRax = 12;
+
+// The maximum byte length of an x86/x64 instruction.
+static const size_t MaximumInstructionLength = 15;
+
+// Make a region of memory RWX.
+void UnprotectExecutableMemory(uint8_t* aAddress, size_t aSize);
+
+} // recordreplay
+} // mozilla
+
+#endif // mozilla_recordreplay_Assembler_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/CallFunction.h
@@ -0,0 +1,116 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_CallFunction_h
+#define mozilla_recordreplay_CallFunction_h
+
+namespace mozilla {
+namespace recordreplay {
+
+// These macros define functions for calling a void* function pointer with
+// a particular ABI and arbitrary arguments. In principle we could do this
+// with varargs (i.e. cast to 'int (ABI *)(...)' before calling), but MSVC
+// treats 'int (__stdcall *)(...)' as 'int (__cdecl *)(...)', unfortunately.
+//
+// After instantiating DefineAllCallFunctions, the resulting functions will
+// be overloaded and have the form, for a given ABI:
+//
+// template <typename ReturnType>
+// ReturnType CallFunctionABI(void* fn);
+//
+// template <typename ReturnType, typename T0>
+// ReturnType CallFunctionABI(void* fn, T0 a0);
+//
+// template <typename ReturnType, typename T0, typename T1>
+// ReturnType CallFunctionABI(void* fn, T0 a0, T1 a1);
+//
+// And so forth.
+#define DefineCallFunction(aABI, aReturnType, aFormals, aFormalTypes, aActuals) \
+  static inline aReturnType CallFunction ##aABI aFormals {              \
+    MOZ_ASSERT(aFn);                                                    \
+    return BitwiseCast<aReturnType (aABI *) aFormalTypes>(aFn) aActuals; \
+  }
+#define DefineAllCallFunctions(aABI)                                  \
+  template <typename ReturnType>                                      \
+  DefineCallFunction(aABI, ReturnType, (void* aFn), (), ())           \
+  template <typename ReturnType, typename T0>                         \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0), (T0), (a0))                  \
+  template <typename ReturnType, typename T0, typename T1>            \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1), (T0, T1), (a0, a1))   \
+  template <typename ReturnType, typename T0, typename T1, typename T2> \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2),                \
+                     (T0, T1, T2), (a0, a1, a2))                      \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3> \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3),         \
+                     (T0, T1, T2, T3),                                \
+                     (a0, a1, a2, a3))                                \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4>                                              \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4),  \
+                     (T0, T1, T2, T3, T4),                            \
+                     (a0, a1, a2, a3, a4))                            \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5>                                 \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5), \
+                     (T0, T1, T2, T3, T4, T5),                        \
+                     (a0, a1, a2, a3, a4, a5))                        \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6>                    \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6),                                         \
+                     (T0, T1, T2, T3, T4, T5, T6),                    \
+                     (a0, a1, a2, a3, a4, a5, a6))                    \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6, typename T7>       \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6, T7 a7),                                  \
+                     (T0, T1, T2, T3, T4, T5, T6, T7),                \
+                     (a0, a1, a2, a3, a4, a5, a6, a7))                \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6, typename T7,       \
+            typename T8>                                              \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6, T7 a7, T8 a8),                           \
+                     (T0, T1, T2, T3, T4, T5, T6, T7, T8),            \
+                     (a0, a1, a2, a3, a4, a5, a6, a7, a8))            \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6, typename T7,       \
+            typename T8, typename T9>                                 \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6, T7 a7, T8 a8, T9 a9),                    \
+                     (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9),        \
+                     (a0, a1, a2, a3, a4, a5, a6, a7, a8, a9))        \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6, typename T7,       \
+            typename T8, typename T9, typename T10>                   \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6, T7 a7, T8 a8, T9 a9, T10 a10),           \
+                     (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10),   \
+                     (a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10))   \
+  template <typename ReturnType, typename T0, typename T1, typename T2, typename T3, \
+            typename T4, typename T5, typename T6, typename T7,       \
+            typename T8, typename T9, typename T10, typename T11>     \
+  DefineCallFunction(aABI, ReturnType,                                \
+                     (void* aFn, T0 a0, T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, \
+                      T6 a6, T7 a7, T8 a8, T9 a9, T10 a10, T11 a11),  \
+                     (T0, T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11), \
+                     (a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11))
+
+} // recordreplay
+} // mozilla
+
+#endif // mozilla_recordreplay_CallFunction_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Callback.cpp
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Callback.h"
+
+#include "ipc/ChildIPC.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/RecordReplay.h"
+#include "mozilla/StaticMutex.h"
+#include "ProcessRewind.h"
+#include "Thread.h"
+#include "ValueIndex.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+static ValueIndex* gCallbackData;
+static StaticMutexNotRecorded gCallbackMutex;
+
+void
+RegisterCallbackData(void* aData)
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
+  if (!aData) {
+    return;
+  }
+
+  RecordReplayAssert("RegisterCallbackData");
+
+  AutoOrderedAtomicAccess at;
+  StaticMutexAutoLock lock(gCallbackMutex);
+  if (!gCallbackData) {
+    gCallbackData = new ValueIndex();
+  }
+  gCallbackData->Insert(aData);
+}
+
+void
+BeginCallback(size_t aCallbackId)
+{
+  MOZ_RELEASE_ASSERT(IsRecording());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+
+  Thread* thread = Thread::Current();
+  if (thread->IsMainThread()) {
+    child::EndIdleTime();
+  }
+  thread->SetPassThrough(false);
+
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::ExecuteCallback);
+  thread->Events().WriteScalar(aCallbackId);
+}
+
+void
+EndCallback()
+{
+  MOZ_RELEASE_ASSERT(IsRecording());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+
+  Thread* thread = Thread::Current();
+  if (thread->IsMainThread()) {
+    child::BeginIdleTime();
+  }
+  thread->SetPassThrough(true);
+}
+
+void
+SaveOrRestoreCallbackData(void** aData)
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+  MOZ_RELEASE_ASSERT(gCallbackData);
+
+  Thread* thread = Thread::Current();
+
+  RecordReplayAssert("RestoreCallbackData");
+
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::RestoreCallbackData);
+
+  size_t index = 0;
+  if (IsRecording() && *aData) {
+    StaticMutexAutoLock lock(gCallbackMutex);
+    index = gCallbackData->GetIndex(*aData);
+  }
+  thread->Events().RecordOrReplayScalar(&index);
+
+  if (IsReplaying()) {
+    *aData = const_cast<void*>(gCallbackData->GetValue(index));
+  }
+}
+
+void
+RemoveCallbackData(void* aData)
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+
+  StaticMutexAutoLock lock(gCallbackMutex);
+  gCallbackData->Remove(aData);
+}
+
+void
+PassThroughThreadEventsAllowCallbacks(const std::function<void()>& aFn)
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+
+  Thread* thread = Thread::Current();
+
+  if (IsRecording()) {
+    if (thread->IsMainThread()) {
+      child::BeginIdleTime();
+    }
+    thread->SetPassThrough(true);
+    aFn();
+    if (thread->IsMainThread()) {
+      child::EndIdleTime();
+    }
+    thread->SetPassThrough(false);
+    thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CallbacksFinished);
+  } else {
+    while (true) {
+      ThreadEvent ev = (ThreadEvent) thread->Events().ReadScalar();
+      if (ev != ThreadEvent::ExecuteCallback) {
+        if (ev != ThreadEvent::CallbacksFinished) {
+          child::ReportFatalError("Unexpected event while replaying callback events");
+        }
+        break;
+      }
+      size_t id = thread->Events().ReadScalar();
+      ReplayInvokeCallback(id);
+    }
+  }
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Callback.h
@@ -0,0 +1,96 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_Callback_h
+#define mozilla_recordreplay_Callback_h
+
+#include "mozilla/GuardObjects.h"
+
+#include <functional>
+
+namespace mozilla {
+namespace recordreplay {
+
+// Callbacks Overview.
+//
+// Record/replay callbacks are used to record and replay the use of callbacks
+// within system libraries to reenter Gecko code. There are three challenges
+// to replaying callbacks:
+//
+// 1. Invocations of the callbacks must be replayed so that they occur inside
+//    the same system call and in the same order as during recording.
+//
+// 2. Data passed to the callback which originates in Gecko itself (e.g.
+//    opaque data pointers) need to match up with the Gecko data which was
+//    passed to the callback while recording.
+//
+// 3. Data passed to the callback which originates in the system library also
+//    needs to match up with the data passed while recording.
+//
+// Each platform defines a CallbackEvent enum with the different callback
+// signatures that the platform is able to redirect. Callback wrapper functions
+// are then defined for each callback event.
+//
+// The following additional steps are taken to handle #1 above:
+//
+// A. System libraries which Gecko callbacks are passed to are redirected so
+//    that they replace the Gecko callback with the callback wrapper for that
+//    signature.
+//
+// B. When recording, system libraries which can invoke Gecko callbacks are
+//    redirected to call the library API inside a call to
+//    PassThroughThreadEventsAllowCallbacks.
+//
+// C. When a callback wrapper is invoked within the library, it calls
+//    {Begin,End}Callback to stop passing through thread events while the
+//    callback executes.
+//
+// D. {Begin,End}Callback additionally adds ExecuteCallback events for the
+//    thread, and PassThroughThreadEventsAllowCallbacks adds a
+//    CallbacksFinished event at the end. While replaying, calling
+//    PassThroughThreadEventsAllowCallbacks will read these callback events
+//    from the thread's events file and plas back calls to the wrappers which
+//    executed while recording.
+//
+// #2 above is handled with the callback data API below. When a Gecko callback
+// or opaque data pointer is passed to a system library API, that API is
+// redirected so that it will call RegisterCallbackData on the Gecko pointer.
+// Later, when the callback wrapper actually executes, it can use
+// SaveOrRestoreCallbackData to record which Gecko pointer was used and later,
+// during replay, restore the corresponding value in that execution.
+//
+// #3 above can be recorded and replayed using the standard
+// RecordReplay{Value,Bytes} functions, in a similar manner to the handling of
+// outputs of redirected functions.
+
+// Note or remove a pointer passed to a system library API which might be a
+// Gecko callback or a data pointer used by a Gecko callback.
+void RegisterCallbackData(void* aData);
+void RemoveCallbackData(void* aData);
+
+// Record/replay a pointer that was passed to RegisterCallbackData earlier.
+void SaveOrRestoreCallbackData(void** aData);
+
+// If recording, call aFn with events passed through, allowing Gecko callbacks
+// to execute within aFn. If replaying, execute only the Gecko callbacks which
+// executed while recording.
+void PassThroughThreadEventsAllowCallbacks(const std::function<void()>& aFn);
+
+// Within a callback wrapper, bracket the execution of the code for the Gecko
+// callback and record the callback as having executed. This stops passing
+// through thread events so that behaviors in the Gecko callback are
+// recorded/replayed.
+void BeginCallback(size_t aCallbackId);
+void EndCallback();
+
+// During replay, invoke a callback with the specified id. This is platform
+// specific and is defined in the various ProcessRedirect*.cpp files.
+void ReplayInvokeCallback(size_t aCallbackId);
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_Callback_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/ChunkAllocator.h
@@ -0,0 +1,108 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_ChunkAllocator_h
+#define mozilla_recordreplay_ChunkAllocator_h
+
+#include "SpinLock.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// ChunkAllocator is a simple allocator class for creating objects which can be
+// fetched by their integer id. Objects are stored as a linked list of arrays;
+// like a linked list, existing entries can be accessed without taking or
+// holding a lock, and using an array in each element mitigates the runtime
+// cost of O(n) lookup.
+//
+// ChunkAllocator contents are never destroyed.
+template <typename T>
+class ChunkAllocator
+{
+  struct Chunk;
+  typedef Atomic<Chunk*, SequentiallyConsistent, Behavior::DontPreserve> ChunkPointer;
+
+  // A page sized block holding a next pointer and an array of as many things
+  // as possible.
+  struct Chunk
+  {
+    uint8_t mStorage[PageSize - sizeof(Chunk*)];
+    ChunkPointer mNext;
+    Chunk() : mStorage{}, mNext(nullptr) {}
+
+    static size_t MaxThings() {
+      return sizeof(mStorage) / sizeof(T);
+    }
+
+    T* GetThing(size_t i) {
+      MOZ_RELEASE_ASSERT(i < MaxThings());
+      return reinterpret_cast<T*>(&mStorage[i * sizeof(T)]);
+    }
+  };
+
+  ChunkPointer mFirstChunk;
+  Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mCapacity;
+  SpinLock mLock;
+
+  void EnsureChunk(ChunkPointer* aChunk) {
+    if (!*aChunk) {
+      *aChunk = new Chunk();
+      mCapacity += Chunk::MaxThings();
+    }
+  }
+
+  ChunkAllocator(const ChunkAllocator&) = delete;
+  ChunkAllocator& operator=(const ChunkAllocator&) = delete;
+
+public:
+  // ChunkAllocators are allocated in static storage and should not have
+  // constructors. Their memory will be initially zero.
+  ChunkAllocator() = default;
+  ~ChunkAllocator() = default;
+
+  // Get an existing entry from the allocator.
+  inline T* Get(size_t aId) {
+    Chunk* chunk = mFirstChunk;
+    while (aId >= Chunk::MaxThings()) {
+      aId -= Chunk::MaxThings();
+      chunk = chunk->mNext;
+    }
+    return chunk->GetThing(aId);
+  }
+
+  // Get an existing entry from the allocator, or null. This may return an
+  // entry that has not been created yet.
+  inline T* MaybeGet(size_t aId) {
+    return (aId < mCapacity) ? Get(aId) : nullptr;
+  }
+
+  // Create a new entry with the specified ID. This must not be called on IDs
+  // that have already been used with this allocator.
+  inline T* Create(size_t aId) {
+    if (aId < mCapacity) {
+      T* res = Get(aId);
+      return new(res) T();
+    }
+
+    AutoSpinLock lock(mLock);
+    ChunkPointer* pchunk = &mFirstChunk;
+    while (aId >= Chunk::MaxThings()) {
+      aId -= Chunk::MaxThings();
+      EnsureChunk(pchunk);
+      Chunk* chunk = *pchunk;
+      pchunk = &chunk->mNext;
+    }
+    EnsureChunk(pchunk);
+    Chunk* chunk = *pchunk;
+    T* res = chunk->GetThing(aId);
+    return new(res) T();
+  }
+};
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_ChunkAllocator_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/DirtyMemoryHandler.cpp
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "DirtyMemoryHandler.h"
+
+#include "ipc/ChildIPC.h"
+#include "mozilla/Sprintf.h"
+#include "MemorySnapshot.h"
+#include "Thread.h"
+
+#include <mach/exc.h>
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <sys/time.h>
+
+namespace mozilla {
+namespace recordreplay {
+
+static mach_port_t gDirtyMemoryExceptionPort;
+
+// See AsmJSSignalHandlers.cpp.
+static const mach_msg_id_t sExceptionId = 2405;
+
+// This definition was generated by mig (the Mach Interface Generator) for the
+// routine 'exception_raise' (exc.defs). See js/src/wasm/WasmSignalHandlers.cpp.
+#pragma pack(4)
+typedef struct {
+  mach_msg_header_t Head;
+  /* start of the kernel processed data */
+  mach_msg_body_t msgh_body;
+  mach_msg_port_descriptor_t thread;
+  mach_msg_port_descriptor_t task;
+  /* end of the kernel processed data */
+  NDR_record_t NDR;
+  exception_type_t exception;
+  mach_msg_type_number_t codeCnt;
+  int64_t code[2];
+} Request__mach_exception_raise_t;
+#pragma pack()
+
+typedef struct {
+  Request__mach_exception_raise_t body;
+  mach_msg_trailer_t trailer;
+} ExceptionRequest;
+
+static void
+DirtyMemoryExceptionHandlerThread(void*)
+{
+  kern_return_t kret;
+
+  while (true) {
+    ExceptionRequest request;
+    kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
+                    gDirtyMemoryExceptionPort, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+    kern_return_t replyCode = KERN_FAILURE;
+    if (kret == KERN_SUCCESS &&
+        request.body.Head.msgh_id == sExceptionId &&
+        request.body.exception == EXC_BAD_ACCESS &&
+        request.body.codeCnt == 2)
+    {
+      uint8_t* faultingAddress = (uint8_t*) request.body.code[1];
+      if (HandleDirtyMemoryFault(faultingAddress)) {
+        replyCode = KERN_SUCCESS;
+      } else {
+        child::ReportFatalError("HandleDirtyMemoryFault failed %p %s", faultingAddress,
+                                gMozCrashReason ? gMozCrashReason : "");
+      }
+    } else {
+      child::ReportFatalError("DirtyMemoryExceptionHandlerThread mach_msg returned unexpected data");
+    }
+
+    __Reply__exception_raise_t reply;
+    reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
+    reply.Head.msgh_size = sizeof(reply);
+    reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
+    reply.Head.msgh_local_port = MACH_PORT_NULL;
+    reply.Head.msgh_id = request.body.Head.msgh_id + 100;
+    reply.NDR = NDR_record;
+    reply.RetCode = replyCode;
+    mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
+             MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+  }
+}
+
+void
+SetupDirtyMemoryHandler()
+{
+  // Allow repeated calls.
+  static bool hasDirtyMemoryHandler = false;
+  if (hasDirtyMemoryHandler) {
+    return;
+  }
+  hasDirtyMemoryHandler = true;
+
+  MOZ_RELEASE_ASSERT(AreThreadEventsPassedThrough());
+  kern_return_t kret;
+
+  // Get a port which can send and receive data.
+  kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &gDirtyMemoryExceptionPort);
+  MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
+
+  kret = mach_port_insert_right(mach_task_self(),
+                                gDirtyMemoryExceptionPort, gDirtyMemoryExceptionPort,
+                                MACH_MSG_TYPE_MAKE_SEND);
+  MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
+
+  // Create a thread to block on reading the port.
+  Thread::SpawnNonRecordedThread(DirtyMemoryExceptionHandlerThread, nullptr);
+
+  // Set exception ports on the entire task. Unfortunately, this clobbers any
+  // other exception ports for the task, and forwarding to those other ports
+  // is not easy to get right.
+  kret = task_set_exception_ports(mach_task_self(),
+                                  EXC_MASK_BAD_ACCESS,
+                                  gDirtyMemoryExceptionPort,
+                                  EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
+                                  THREAD_STATE_NONE);
+  MOZ_RELEASE_ASSERT(kret == KERN_SUCCESS);
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/DirtyMemoryHandler.h
@@ -0,0 +1,20 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_DirtyMemoryHandler_h
+#define mozilla_recordreplay_DirtyMemoryHandler_h
+
+namespace mozilla {
+namespace recordreplay {
+
+// Set up a handler to catch SEGV hardware exceptions and pass them on to
+// HandleDirtyMemoryFault in MemorySnapshot.h for handling.
+void SetupDirtyMemoryHandler();
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_DirtyMemoryHandler_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/File.cpp
@@ -0,0 +1,446 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "File.h"
+
+#include "ipc/ChildIPC.h"
+#include "mozilla/Compression.h"
+#include "mozilla/Sprintf.h"
+#include "ProcessRewind.h"
+#include "SpinLock.h"
+
+#include <algorithm>
+
+namespace mozilla {
+namespace recordreplay {
+
+///////////////////////////////////////////////////////////////////////////////
+// Stream
+///////////////////////////////////////////////////////////////////////////////
+
+void
+Stream::ReadBytes(void* aData, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(mFile->OpenForReading());
+
+  size_t totalRead = 0;
+
+  while (true) {
+    // Read what we can from the data buffer.
+    MOZ_RELEASE_ASSERT(mBufferPos <= mBufferLength);
+    size_t bufAvailable = mBufferLength - mBufferPos;
+    size_t bufRead = std::min(bufAvailable, aSize);
+    if (aData) {
+      memcpy(aData, &mBuffer[mBufferPos], bufRead);
+      aData = (char*)aData + bufRead;
+    }
+    mBufferPos += bufRead;
+    mStreamPos += bufRead;
+    totalRead += bufRead;
+    aSize -= bufRead;
+
+    if (!aSize) {
+      return;
+    }
+
+    MOZ_RELEASE_ASSERT(mBufferPos == mBufferLength);
+
+    // If we try to read off the end of a stream then we must have hit the end
+    // of the replay for this thread.
+    while (mChunkIndex == mChunks.length()) {
+      MOZ_RELEASE_ASSERT(mName == StreamName::Event || mName == StreamName::Assert);
+      HitEndOfRecording();
+    }
+
+    const StreamChunkLocation& chunk = mChunks[mChunkIndex++];
+
+    EnsureMemory(&mBallast, &mBallastSize, chunk.mCompressedSize, BallastMaxSize(),
+                 DontCopyExistingData);
+    mFile->ReadChunk(mBallast.get(), chunk);
+
+    EnsureMemory(&mBuffer, &mBufferSize, chunk.mDecompressedSize, BUFFER_MAX,
+                 DontCopyExistingData);
+
+    size_t bytesWritten;
+    if (!Compression::LZ4::decompress(mBallast.get(), chunk.mCompressedSize,
+                                      mBuffer.get(), chunk.mDecompressedSize, &bytesWritten) ||
+        bytesWritten != chunk.mDecompressedSize)
+    {
+      MOZ_CRASH();
+    }
+
+    mBufferPos = 0;
+    mBufferLength = chunk.mDecompressedSize;
+  }
+}
+
+bool
+Stream::AtEnd()
+{
+  MOZ_RELEASE_ASSERT(mFile->OpenForReading());
+
+  return mBufferPos == mBufferLength && mChunkIndex == mChunks.length();
+}
+
+void
+Stream::WriteBytes(const void* aData, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(mFile->OpenForWriting());
+
+  // Prevent the entire file from being flushed while we write this data.
+  AutoReadSpinLock streamLock(mFile->mStreamLock);
+
+  while (true) {
+    // Fill up the data buffer first.
+    MOZ_RELEASE_ASSERT(mBufferPos <= mBufferSize);
+    size_t bufAvailable = mBufferSize - mBufferPos;
+    size_t bufWrite = (bufAvailable < aSize) ? bufAvailable : aSize;
+    memcpy(&mBuffer[mBufferPos], aData, bufWrite);
+    mBufferPos += bufWrite;
+    mStreamPos += bufWrite;
+    if (bufWrite == aSize) {
+      return;
+    }
+    aData = (char*)aData + bufWrite;
+    aSize -= bufWrite;
+
+    // Grow the file's buffer if it is not at its maximum size.
+    if (mBufferSize < BUFFER_MAX) {
+      EnsureMemory(&mBuffer, &mBufferSize, mBufferSize + 1, BUFFER_MAX, CopyExistingData);
+      continue;
+    }
+
+    Flush(/* aTakeLock = */ true);
+  }
+}
+
+size_t
+Stream::ReadScalar()
+{
+  // Read back a pointer sized value using the same encoding as WriteScalar.
+  size_t value = 0, shift = 0;
+  while (true) {
+    uint8_t bits;
+    ReadBytes(&bits, 1);
+    value |= (size_t)(bits & 127) << shift;
+    if (!(bits & 128)) {
+      break;
+    }
+    shift += 7;
+  }
+  return value;
+}
+
+void
+Stream::WriteScalar(size_t aValue)
+{
+  // Pointer sized values are written out as unsigned values with an encoding
+  // optimized for small values. Each written byte successively captures 7 bits
+  // of data from the value, starting at the low end, with the high bit in the
+  // byte indicating whether there are any more non-zero bits in the value.
+  //
+  // With this encoding, values less than 2^7 (128) require one byte, values
+  // less than 2^14 (16384) require two bytes, and so forth, but negative
+  // numbers end up requiring ten bytes on a 64 bit architecture.
+  do {
+    uint8_t bits = aValue & 127;
+    aValue = aValue >> 7;
+    if (aValue) {
+      bits |= 128;
+    }
+    WriteBytes(&bits, 1);
+  } while (aValue);
+}
+
+void
+Stream::CheckInput(size_t aValue)
+{
+  size_t oldValue = aValue;
+  RecordOrReplayScalar(&oldValue);
+  if (oldValue != aValue) {
+    child::ReportFatalError("Input Mismatch: Recorded: %zu Replayed %zu\n", oldValue, aValue);
+    Unreachable();
+  }
+}
+
+void
+Stream::EnsureMemory(UniquePtr<char[]>* aBuf, size_t* aSize,
+                     size_t aNeededSize, size_t aMaxSize, ShouldCopy aCopy)
+{
+  // Once a stream buffer grows, it never shrinks again. Buffers start out
+  // small because most streams are very small.
+  MOZ_RELEASE_ASSERT(!!*aBuf == !!*aSize);
+  MOZ_RELEASE_ASSERT(aNeededSize <= aMaxSize);
+  if (*aSize < aNeededSize) {
+    size_t newSize = std::min(std::max<size_t>(256, aNeededSize * 2), aMaxSize);
+    char* newBuf = new char[newSize];
+    if (*aBuf && aCopy == CopyExistingData) {
+      memcpy(newBuf, aBuf->get(), *aSize);
+    }
+    aBuf->reset(newBuf);
+    *aSize = newSize;
+  }
+}
+
+void
+Stream::Flush(bool aTakeLock)
+{
+  MOZ_RELEASE_ASSERT(mFile && mFile->OpenForWriting());
+
+  if (!mBufferPos) {
+    return;
+  }
+
+  size_t bound = Compression::LZ4::maxCompressedSize(mBufferPos);
+  EnsureMemory(&mBallast, &mBallastSize, bound, BallastMaxSize(),
+               DontCopyExistingData);
+
+  size_t compressedSize = Compression::LZ4::compress(mBuffer.get(), mBufferPos, mBallast.get());
+  MOZ_RELEASE_ASSERT(compressedSize != 0);
+  MOZ_RELEASE_ASSERT((size_t)compressedSize <= bound);
+
+  StreamChunkLocation chunk =
+    mFile->WriteChunk(mBallast.get(), compressedSize, mBufferPos, aTakeLock);
+  mChunks.append(chunk);
+  MOZ_ALWAYS_TRUE(++mChunkIndex == mChunks.length());
+
+  mBufferPos = 0;
+}
+
+/* static */ size_t
+Stream::BallastMaxSize()
+{
+  return Compression::LZ4::maxCompressedSize(BUFFER_MAX);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// File
+///////////////////////////////////////////////////////////////////////////////
+
+// Information in a file index about a chunk.
+struct FileIndexChunk
+{
+  uint32_t /* StreamName */ mName;
+  uint32_t mNameIndex;
+  StreamChunkLocation mChunk;
+
+  FileIndexChunk()
+  {
+    PodZero(this);
+  }
+
+  FileIndexChunk(StreamName aName, uint32_t aNameIndex, const StreamChunkLocation& aChunk)
+    : mName((uint32_t) aName), mNameIndex(aNameIndex), mChunk(aChunk)
+  {}
+};
+
+// We expect to find this at every index in a file.
+static const uint64_t MagicValue = 0xd3e7f5fae445b3ac;
+
+// Index of chunks in a file. There is an index at the start of the file
+// (which is always empty) and at various places within the file itself.
+struct FileIndex
+{
+  // This should match MagicValue.
+  uint64_t mMagic;
+
+  // How many FileIndexChunk instances follow this structure.
+  uint32_t mNumChunks;
+
+  // The location of the next index in the file, or zero.
+  uint64_t mNextIndexOffset;
+
+  explicit FileIndex(uint32_t aNumChunks)
+    : mMagic(MagicValue), mNumChunks(aNumChunks), mNextIndexOffset(0)
+  {}
+};
+
+bool
+File::Open(const char* aName, Mode aMode)
+{
+  MOZ_RELEASE_ASSERT(!mFd);
+  MOZ_RELEASE_ASSERT(aName);
+
+  mMode = aMode;
+  mFd = DirectOpenFile(aName, mMode == WRITE);
+
+  if (OpenForWriting()) {
+    // Write an empty index at the start of the file.
+    FileIndex index(0);
+    DirectWrite(mFd, &index, sizeof(index));
+    mWriteOffset += sizeof(index);
+    return true;
+  }
+
+  // Read in every index in the file.
+  ReadIndexResult result;
+  do {
+    result = ReadNextIndex(nullptr);
+    if (result == ReadIndexResult::InvalidFile) {
+      return false;
+    }
+  } while (result == ReadIndexResult::FoundIndex);
+
+  return true;
+}
+
+void
+File::Close()
+{
+  if (!mFd) {
+    return;
+  }
+
+  if (OpenForWriting()) {
+    Flush();
+  }
+
+  Clear();
+}
+
+File::ReadIndexResult
+File::ReadNextIndex(InfallibleVector<Stream*>* aUpdatedStreams)
+{
+  // Unlike in the Flush() case, we don't have to worry about other threads
+  // attempting to read data from streams in this file while we are reading
+  // the new index.
+  MOZ_ASSERT(OpenForReading());
+
+  // Read in the last index to see if there is another one.
+  DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
+  uint64_t nextIndexOffset;
+  if (DirectRead(mFd, &nextIndexOffset, sizeof(nextIndexOffset)) != sizeof(nextIndexOffset)) {
+    return ReadIndexResult::InvalidFile;
+  }
+  if (!nextIndexOffset) {
+    return ReadIndexResult::EndOfFile;
+  }
+
+  mLastIndexOffset = nextIndexOffset;
+
+  FileIndex index(0);
+  DirectSeekFile(mFd, nextIndexOffset);
+  if (DirectRead(mFd, &index, sizeof(index)) != sizeof(index)) {
+    return ReadIndexResult::InvalidFile;
+  }
+  if (index.mMagic != MagicValue) {
+    return ReadIndexResult::InvalidFile;
+  }
+
+  MOZ_RELEASE_ASSERT(index.mNumChunks);
+
+  size_t indexBytes = index.mNumChunks * sizeof(FileIndexChunk);
+  FileIndexChunk* chunks = new FileIndexChunk[index.mNumChunks];
+  if (DirectRead(mFd, chunks, indexBytes) != indexBytes) {
+    return ReadIndexResult::InvalidFile;
+  }
+  for (size_t i = 0; i < index.mNumChunks; i++) {
+    const FileIndexChunk& indexChunk = chunks[i];
+    Stream* stream = OpenStream((StreamName) indexChunk.mName, indexChunk.mNameIndex);
+    stream->mChunks.append(indexChunk.mChunk);
+    if (aUpdatedStreams) {
+      aUpdatedStreams->append(stream);
+    }
+  }
+  delete[] chunks;
+
+  return ReadIndexResult::FoundIndex;
+}
+
+bool
+File::Flush()
+{
+  MOZ_ASSERT(OpenForWriting());
+  AutoSpinLock lock(mLock);
+
+  InfallibleVector<FileIndexChunk> newChunks;
+  for (auto& vector : mStreams) {
+    for (const UniquePtr<Stream>& stream : vector) {
+      if (stream) {
+        stream->Flush(/* aTakeLock = */ false);
+        for (size_t i = stream->mFlushedChunks; i < stream->mChunkIndex; i++) {
+          newChunks.emplaceBack(stream->mName, stream->mNameIndex, stream->mChunks[i]);
+        }
+        stream->mFlushedChunks = stream->mChunkIndex;
+      }
+    }
+  }
+
+  if (newChunks.empty()) {
+    return false;
+  }
+
+  // Write the new index information at the end of the file.
+  uint64_t indexOffset = mWriteOffset;
+  size_t indexBytes = newChunks.length() * sizeof(FileIndexChunk);
+  FileIndex index(newChunks.length());
+  DirectWrite(mFd, &index, sizeof(index));
+  DirectWrite(mFd, newChunks.begin(), indexBytes);
+  mWriteOffset += sizeof(index) + indexBytes;
+
+  // Update the next index offset for the last index written.
+  MOZ_RELEASE_ASSERT(sizeof(index.mNextIndexOffset) == sizeof(indexOffset));
+  DirectSeekFile(mFd, mLastIndexOffset + offsetof(FileIndex, mNextIndexOffset));
+  DirectWrite(mFd, &indexOffset, sizeof(indexOffset));
+  DirectSeekFile(mFd, mWriteOffset);
+
+  mLastIndexOffset = indexOffset;
+
+  return true;
+}
+
+StreamChunkLocation
+File::WriteChunk(const char* aStart,
+                 size_t aCompressedSize, size_t aDecompressedSize,
+                 bool aTakeLock)
+{
+  Maybe<AutoSpinLock> lock;
+  if (aTakeLock) {
+    lock.emplace(mLock);
+  }
+
+  StreamChunkLocation chunk;
+  chunk.mOffset = mWriteOffset;
+  chunk.mCompressedSize = aCompressedSize;
+  chunk.mDecompressedSize = aDecompressedSize;
+
+  DirectWrite(mFd, aStart, aCompressedSize);
+  mWriteOffset += aCompressedSize;
+
+  return chunk;
+}
+
+void
+File::ReadChunk(char* aDest, const StreamChunkLocation& aChunk)
+{
+  AutoSpinLock lock(mLock);
+  DirectSeekFile(mFd, aChunk.mOffset);
+  size_t res = DirectRead(mFd, aDest, aChunk.mCompressedSize);
+  if (res != aChunk.mCompressedSize) {
+    MOZ_CRASH();
+  }
+}
+
+Stream*
+File::OpenStream(StreamName aName, size_t aNameIndex)
+{
+  AutoSpinLock lock(mLock);
+
+  auto& vector = mStreams[(size_t)aName];
+
+  while (aNameIndex >= vector.length()) {
+    vector.emplaceBack();
+  }
+
+  UniquePtr<Stream>& stream = vector[aNameIndex];
+  if (!stream) {
+    stream.reset(new Stream(this, aName, aNameIndex));
+  }
+  return stream.get();
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/File.h
@@ -0,0 +1,277 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_File_h
+#define mozilla_recordreplay_File_h
+
+#include "InfallibleVector.h"
+#include "ProcessRecordReplay.h"
+#include "SpinLock.h"
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/RecordReplay.h"
+#include "mozilla/UniquePtr.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Structure managing file I/O. Each file contains an index for a set of named
+// streams, whose contents are compressed and interleaved throughout the file.
+// Additionally, we directly manage the file handle and all associated memory.
+// This makes it easier to restore memory snapshots without getting confused
+// about the state of the file handles which the process has opened. Data
+// written and read from files is automatically compressed with LZ4.
+//
+// Files are used internally for any disk accesses which the record/replay
+// infrastructure needs to make. Currently, this is only for accessing the
+// recording file.
+//
+// File is threadsafe for simultaneous read/read and write/write accesses.
+// Stream is not threadsafe.
+
+// A location of a chunk of a stream within a file.
+struct StreamChunkLocation
+{
+  // Offset into the file of the start of the chunk.
+  uint64_t mOffset;
+
+  // Compressed (stored) size of the chunk.
+  uint32_t mCompressedSize;
+
+  // Decompressed size of the chunk.
+  uint32_t mDecompressedSize;
+
+  inline bool operator == (const StreamChunkLocation& aOther) const {
+    return mOffset == aOther.mOffset
+        && mCompressedSize == aOther.mCompressedSize
+        && mDecompressedSize == aOther.mDecompressedSize;
+  }
+};
+
+enum class StreamName
+{
+  Main,
+  Lock,
+  Event,
+  Assert,
+  Count
+};
+
+class File;
+
+class Stream
+{
+  friend class File;
+
+  // File this stream belongs to.
+  File* mFile;
+
+  // Prefix name for this stream.
+  StreamName mName;
+
+  // Index which, when combined to mName, uniquely identifies this stream in
+  // the file.
+  size_t mNameIndex;
+
+  // When writing, all chunks that have been flushed to disk. When reading, all
+  // chunks in the entire stream.
+  InfallibleVector<StreamChunkLocation> mChunks;
+
+  // Data buffer.
+  UniquePtr<char[]> mBuffer;
+
+  // The maximum number of bytes to buffer before compressing and writing to
+  // disk, and the maximum number of bytes that can be decompressed at once.
+  static const size_t BUFFER_MAX = 1024 * 1024;
+
+  // The capacity of mBuffer, at most BUFFER_MAX.
+  size_t mBufferSize;
+
+  // During reading, the number of accessible bytes in mBuffer.
+  size_t mBufferLength;
+
+  // The number of bytes read or written from mBuffer.
+  size_t mBufferPos;
+
+  // The number of uncompressed bytes read or written from the stream.
+  size_t mStreamPos;
+
+  // Any buffer available for use when decompressing or compressing data.
+  UniquePtr<char[]> mBallast;
+  size_t mBallastSize;
+
+  // The number of chunks that have been completely read or written. When
+  // writing, this equals mChunks.length().
+  size_t mChunkIndex;
+
+  // When writing, the number of chunks in this stream when the file was last
+  // flushed.
+  size_t mFlushedChunks;
+
+  Stream(File* aFile, StreamName aName, size_t aNameIndex)
+    : mFile(aFile)
+    , mName(aName)
+    , mNameIndex(aNameIndex)
+    , mBuffer(nullptr)
+    , mBufferSize(0)
+    , mBufferLength(0)
+    , mBufferPos(0)
+    , mStreamPos(0)
+    , mBallast(nullptr)
+    , mBallastSize(0)
+    , mChunkIndex(0)
+    , mFlushedChunks(0)
+  {}
+
+public:
+  StreamName Name() const { return mName; }
+  size_t NameIndex() const { return mNameIndex; }
+
+  void ReadBytes(void* aData, size_t aSize);
+  void WriteBytes(const void* aData, size_t aSize);
+  size_t ReadScalar();
+  void WriteScalar(size_t aValue);
+  bool AtEnd();
+
+  inline void RecordOrReplayBytes(void* aData, size_t aSize) {
+    if (IsRecording()) {
+      WriteBytes(aData, aSize);
+    } else {
+      ReadBytes(aData, aSize);
+    }
+  }
+
+  template <typename T>
+  inline void RecordOrReplayScalar(T* aPtr) {
+    if (IsRecording()) {
+      WriteScalar((size_t)*aPtr);
+    } else {
+      *aPtr = (T)ReadScalar();
+    }
+  }
+
+  template <typename T>
+  inline void RecordOrReplayValue(T* aPtr) {
+    RecordOrReplayBytes(aPtr, sizeof(T));
+  }
+
+  // Make sure that a value is the same while replaying as it was while
+  // recording.
+  void CheckInput(size_t aValue);
+
+  // Add a thread event to this file. Each thread event in a file is followed
+  // by additional data specific to that event. Generally, CheckInput should be
+  // used while recording or replaying the data for a thread event so that any
+  // discrepancies with the recording are found immediately.
+  inline void RecordOrReplayThreadEvent(ThreadEvent aEvent) {
+    CheckInput((size_t)aEvent);
+  }
+
+  inline size_t StreamPosition() {
+    return mStreamPos;
+  }
+
+private:
+  enum ShouldCopy {
+    DontCopyExistingData,
+    CopyExistingData
+  };
+
+  void EnsureMemory(UniquePtr<char[]>* aBuf, size_t* aSize, size_t aNeededSize, size_t aMaxSize,
+                    ShouldCopy aCopy);
+  void Flush(bool aTakeLock);
+
+  static size_t BallastMaxSize();
+};
+
+class File
+{
+public:
+  enum Mode {
+    WRITE,
+    READ
+  };
+
+  friend class Stream;
+
+private:
+  // Open file handle, or 0 if closed.
+  FileHandle mFd;
+
+  // Whether this file is open for writing or reading.
+  Mode mMode;
+
+  // When writing, the current offset into the file.
+  uint64_t mWriteOffset;
+
+  // The offset of the last index read or written to the file.
+  uint64_t mLastIndexOffset;
+
+  // All streams in this file, indexed by stream name and name index.
+  typedef InfallibleVector<UniquePtr<Stream>> StreamVector;
+  StreamVector mStreams[(size_t) StreamName::Count];
+
+  // Lock protecting access to this file.
+  SpinLock mLock;
+
+  // When writing, lock for synchronizing file flushes (writer) with other
+  // threads writing to streams in this file (readers).
+  ReadWriteSpinLock mStreamLock;
+
+  void Clear() {
+    mFd = 0;
+    mMode = READ;
+    mWriteOffset = 0;
+    mLastIndexOffset = 0;
+    for (auto& vector : mStreams) {
+      vector.clear();
+    }
+    PodZero(&mLock);
+    PodZero(&mStreamLock);
+  }
+
+public:
+  File() { Clear(); }
+  ~File() { Close(); }
+
+  bool Open(const char* aName, Mode aMode);
+  void Close();
+
+  bool OpenForWriting() const { return mFd && mMode == WRITE; }
+  bool OpenForReading() const { return mFd && mMode == READ; }
+
+  Stream* OpenStream(StreamName aName, size_t aNameIndex);
+
+  // Prevent/allow other threads to write to streams in this file.
+  void PreventStreamWrites() { mStreamLock.WriteLock(); }
+  void AllowStreamWrites() { mStreamLock.WriteUnlock(); }
+
+  // Flush any changes since the last Flush() call to disk, returning whether
+  // there were such changes.
+  bool Flush();
+
+  enum class ReadIndexResult {
+    InvalidFile,
+    EndOfFile,
+    FoundIndex
+  };
+
+  // Read any data added to the file by a Flush() call. aUpdatedStreams is
+  // optional and filled in with streams whose contents have changed, and may
+  // have duplicates.
+  ReadIndexResult ReadNextIndex(InfallibleVector<Stream*>* aUpdatedStreams);
+
+private:
+  StreamChunkLocation WriteChunk(const char* aStart,
+                                 size_t aCompressedSize, size_t aDecompressedSize,
+                                 bool aTakeLock);
+  void ReadChunk(char* aDest, const StreamChunkLocation& aChunk);
+};
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_File_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/HashTable.cpp
@@ -0,0 +1,502 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/StaticMutex.h"
+
+#include "HashTable.h"
+#include "InfallibleVector.h"
+#include "ProcessRecordReplay.h"
+#include "ProcessRedirect.h"
+#include "ValueIndex.h"
+
+#include "PLDHashTable.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Hash tables frequently incorporate pointer values into the hash numbers they
+// compute, which are not guaranteed to be the same between recording and
+// replaying and consequently lead to inconsistent hash numbers and iteration
+// order between recording and replaying, which can in turn affect the order in
+// which recorded events occur. HashTable stabilization is designed to deal
+// with this problem, for specific kinds of hashtables (PLD and PL tables)
+// which are based on callbacks.
+//
+// When the table is constructed, if we are recording/replaying then the
+// callbacks are replaced with an alternate set that produces consistent hash
+// numbers between recording and replay. If during replay the additions and
+// removals to the tables occur in the same order that they did during
+// recording, then the structure of the tables and the order in which elements
+// are visited during iteration will be the same.
+//
+// Ensuring that hash numbers are consistent is done as follows: for each
+// table, we keep track of the keys that are in the table. When computing the
+// hash of an arbitrary key, we look for a matching key in the table, using
+// that key's hash if found. Otherwise, a new hash is generated from an
+// incrementing counter.
+
+typedef uint32_t HashNumber;
+
+class StableHashTableInfo
+{
+  // Magic number for attempting to determine whether we are dealing with an
+  // actual StableHashTableInfo. Despite our best efforts some hashtables do
+  // not go through stabilization (e.g. they have static constructors that run
+  // before record/replay state is initialized).
+  size_t mMagic;
+
+  static const size_t MagicNumber = 0xDEADBEEFDEADBEEF;
+
+  // Information about a key in the table: the key pointer, along with the new
+  // hash number we have generated for the key.
+  struct KeyInfo {
+    const void* mKey;
+    HashNumber mNewHash;
+  };
+
+  // Table mapping original hash numbers (produced by the table's hash
+  // function) to a vector with all keys sharing that original hash number.
+  struct HashInfo {
+    InfallibleVector<KeyInfo> mKeys;
+  };
+  typedef std::unordered_map<HashNumber, UniquePtr<HashInfo>> HashToKeyMap;
+  HashToKeyMap mHashToKey;
+
+  // Table mapping key pointers to their original hash number.
+  typedef std::unordered_map<const void*, HashNumber> KeyToHashMap;
+  KeyToHashMap mKeyToHash;
+
+  // The last key which the hash function was called on, and the new hash
+  // number which we generated for that key.
+  const void* mLastKey;
+  HashNumber mLastNewHash;
+
+  // Counter for generating new hash numbers for entries added to the table.
+  // This increases monotonically, though it is fine if it overflows.
+  uint32_t mHashGenerator;
+
+  // Buffer with executable memory for use in binding functions.
+  uint8_t* mCallbackStorage;
+  static const size_t CallbackStorageCapacity = 4096;
+
+  // Get an existing key in the table.
+  KeyInfo* FindKeyInfo(HashNumber aOriginalHash, const void* aKey, HashInfo** aHashInfo = nullptr) {
+    HashToKeyMap::iterator iter = mHashToKey.find(aOriginalHash);
+    MOZ_ASSERT(iter != mHashToKey.end());
+
+    HashInfo* hashInfo = iter->second.get();
+    for (KeyInfo& keyInfo : hashInfo->mKeys) {
+      if (keyInfo.mKey == aKey) {
+        if (aHashInfo) {
+          *aHashInfo = hashInfo;
+        }
+        return &keyInfo;
+      }
+    }
+    MOZ_CRASH();
+  }
+
+public:
+  StableHashTableInfo()
+    : mMagic(MagicNumber)
+    , mLastKey(nullptr)
+    , mLastNewHash(0)
+    , mHashGenerator(0)
+    , mCallbackStorage(nullptr)
+  {
+    // Use AllocateMemory, as the result will have RWX permissions.
+    mCallbackStorage = (uint8_t*) AllocateMemory(CallbackStorageCapacity, MemoryKind::Tracked);
+  }
+
+  ~StableHashTableInfo() {
+    MOZ_ASSERT(mHashToKey.empty());
+    DeallocateMemory(mCallbackStorage, CallbackStorageCapacity, MemoryKind::Tracked);
+  }
+
+  bool AppearsValid() {
+    return mMagic == MagicNumber;
+  }
+
+  void AddKey(HashNumber aOriginalHash, const void* aKey, HashNumber aNewHash) {
+    HashToKeyMap::iterator iter = mHashToKey.find(aOriginalHash);
+    if (iter == mHashToKey.end()) {
+      iter = mHashToKey.insert(HashToKeyMap::value_type(aOriginalHash, MakeUnique<HashInfo>())).first;
+    }
+    HashInfo* hashInfo = iter->second.get();
+
+    KeyInfo key;
+    key.mKey = aKey;
+    key.mNewHash = aNewHash;
+    hashInfo->mKeys.append(key);
+
+    mKeyToHash.insert(KeyToHashMap::value_type(aKey, aOriginalHash));
+  }
+
+  void RemoveKey(HashNumber aOriginalHash, const void* aKey) {
+    HashInfo* hashInfo;
+    KeyInfo* keyInfo = FindKeyInfo(aOriginalHash, aKey, &hashInfo);
+    hashInfo->mKeys.erase(keyInfo);
+
+    if (hashInfo->mKeys.length() == 0) {
+      mHashToKey.erase(aOriginalHash);
+    }
+
+    mKeyToHash.erase(aKey);
+  }
+
+  HashNumber FindKeyHash(HashNumber aOriginalHash, const void* aKey) {
+    KeyInfo* info = FindKeyInfo(aOriginalHash, aKey);
+    return info->mNewHash;
+  }
+
+  // Look for a key in the table with a matching original hash and for which
+  // aMatch() is true for, returning its new hash number if found.
+  bool HasMatchingKey(HashNumber aOriginalHash,
+                      const std::function<bool(const void*)>& aMatch,
+                      HashNumber* aNewHash)
+  {
+    HashToKeyMap::const_iterator iter = mHashToKey.find(aOriginalHash);
+    if (iter != mHashToKey.end()) {
+      HashInfo* hashInfo = iter->second.get();
+      for (const KeyInfo& keyInfo : hashInfo->mKeys) {
+        if (aMatch(keyInfo.mKey)) {
+          *aNewHash = keyInfo.mNewHash;
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  HashNumber GetOriginalHashNumber(const void* aKey) {
+    KeyToHashMap::iterator iter = mKeyToHash.find(aKey);
+    MOZ_ASSERT(iter != mKeyToHash.end());
+    return iter->second;
+  }
+
+  class Assembler : public recordreplay::Assembler {
+  public:
+    explicit Assembler(StableHashTableInfo& aInfo)
+      : recordreplay::Assembler(aInfo.mCallbackStorage, CallbackStorageCapacity)
+    {}
+  };
+
+  // Use the callback storage buffer to create a new function T which has one
+  // fewer argument than S and calls S with aArgument bound to the last
+  // argument position. See BindFunctionArgument in ProcessRedirect.h
+  template <typename S, typename T>
+  void NewBoundFunction(Assembler& aAssembler, S aFunction, void* aArgument,
+                        size_t aArgumentPosition, T* aTarget) {
+    void* nfn = BindFunctionArgument(BitwiseCast<void*>(aFunction), aArgument, aArgumentPosition,
+                                     aAssembler);
+    BitwiseCast(nfn, aTarget);
+  }
+
+  // Set the last queried key for this table, and generate a new hash number
+  // for it.
+  HashNumber SetLastKey(const void* aKey) {
+    // Remember the last key queried, so that if it is then added to the table
+    // we know what hash number to use.
+    mLastKey = aKey;
+    mLastNewHash = mHashGenerator++;
+    return mLastNewHash;
+  }
+
+  bool HasLastKey() {
+    return !!mLastKey;
+  }
+
+  HashNumber GetLastNewHash(const void* aKey) {
+    MOZ_ASSERT(aKey == mLastKey);
+    return mLastNewHash;
+  }
+
+  bool IsEmpty() { return mHashToKey.empty(); }
+
+  // Move aOther's contents into this one and clear aOther out. Callbacks for
+  // the tables are left alone.
+  void MoveContentsFrom(StableHashTableInfo& aOther) {
+    mHashToKey = std::move(aOther.mHashToKey);
+    mKeyToHash = std::move(aOther.mKeyToHash);
+    mHashGenerator = aOther.mHashGenerator;
+
+    aOther.mHashToKey.clear();
+    aOther.mKeyToHash.clear();
+    aOther.mHashGenerator = 0;
+
+    mLastKey = aOther.mLastKey = nullptr;
+    mLastNewHash = aOther.mLastNewHash = 0;
+  }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// PLHashTable Stabilization
+///////////////////////////////////////////////////////////////////////////////
+
+// For each PLHashTable in the process, a PLHashTableInfo is generated. This
+// structure becomes the |allocPriv| for the table, handled by the new
+// callbacks given to it.
+struct PLHashTableInfo : public StableHashTableInfo
+{
+  // Original callbacks for the table.
+  PLHashFunction mKeyHash;
+  PLHashComparator mKeyCompare;
+  PLHashComparator mValueCompare;
+  const PLHashAllocOps* mAllocOps;
+
+  // Original private value for the table.
+  void* mAllocPrivate;
+
+  PLHashTableInfo(PLHashFunction aKeyHash,
+                  PLHashComparator aKeyCompare, PLHashComparator aValueCompare,
+                  const PLHashAllocOps* aAllocOps, void* aAllocPrivate)
+    : mKeyHash(aKeyHash),
+      mKeyCompare(aKeyCompare),
+      mValueCompare(aValueCompare),
+      mAllocOps(aAllocOps),
+      mAllocPrivate(aAllocPrivate)
+  {}
+
+  static PLHashTableInfo* FromPrivate(void* aAllocPrivate) {
+    PLHashTableInfo* info = reinterpret_cast<PLHashTableInfo*>(aAllocPrivate);
+    MOZ_RELEASE_ASSERT(info->AppearsValid());
+    return info;
+  }
+};
+
+static void*
+WrapPLHashAllocTable(void* aAllocPrivate, PRSize aSize)
+{
+  PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
+  return info->mAllocOps
+         ? info->mAllocOps->allocTable(info->mAllocPrivate, aSize)
+         : malloc(aSize);
+}
+
+static void
+WrapPLHashFreeTable(void* aAllocPrivate, void* aItem)
+{
+  PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
+  if (info->mAllocOps) {
+    info->mAllocOps->freeTable(info->mAllocPrivate, aItem);
+  } else {
+    free(aItem);
+  }
+}
+
+static PLHashEntry*
+WrapPLHashAllocEntry(void* aAllocPrivate, const void* aKey)
+{
+  PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
+
+  if (info->HasLastKey()) {
+    uint32_t originalHash = info->mKeyHash(aKey);
+    info->AddKey(originalHash, aKey, info->GetLastNewHash(aKey));
+  } else {
+    // A few PLHashTables are manipulated directly by Gecko code, in which case
+    // the hashes are supplied directly to the table and we don't have a chance
+    // to modify them. Fortunately, none of these tables are iterated in a way
+    // that can cause the replay to diverge, so just punt in these cases.
+    MOZ_ASSERT(info->IsEmpty());
+  }
+
+  return info->mAllocOps
+         ? info->mAllocOps->allocEntry(info->mAllocPrivate, aKey)
+         : (PLHashEntry*) malloc(sizeof(PLHashEntry));
+}
+
+static void
+WrapPLHashFreeEntry(void *aAllocPrivate, PLHashEntry *he, PRUintn flag)
+{
+  PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
+
+  // Ignore empty tables, due to the raw table manipulation described above.
+  if (flag == HT_FREE_ENTRY && !info->IsEmpty()) {
+    uint32_t originalHash = info->GetOriginalHashNumber(he->key);
+    info->RemoveKey(originalHash, he->key);
+  }
+
+  if (info->mAllocOps) {
+    info->mAllocOps->freeEntry(info->mAllocPrivate, he, flag);
+  } else if (flag == HT_FREE_ENTRY) {
+    free(he);
+  }
+}
+
+static PLHashAllocOps gWrapPLHashAllocOps = {
+  WrapPLHashAllocTable, WrapPLHashFreeTable,
+  WrapPLHashAllocEntry, WrapPLHashFreeEntry
+};
+
+static uint32_t
+PLHashComputeHash(void* aKey, PLHashTableInfo* aInfo)
+{
+  uint32_t originalHash = aInfo->mKeyHash(aKey);
+  HashNumber newHash;
+  if (aInfo->HasMatchingKey(originalHash,
+                            [=](const void* aExistingKey) {
+                              return aInfo->mKeyCompare(aKey, aExistingKey);
+                            }, &newHash)) {
+    return newHash;
+  }
+  return aInfo->SetLastKey(aKey);
+}
+
+void
+GeneratePLHashTableCallbacks(PLHashFunction* aKeyHash,
+                             PLHashComparator* aKeyCompare,
+                             PLHashComparator* aValueCompare,
+                             const PLHashAllocOps** aAllocOps,
+                             void** aAllocPrivate)
+{
+  PLHashTableInfo* info = new PLHashTableInfo(*aKeyHash, *aKeyCompare, *aValueCompare,
+                                              *aAllocOps, *aAllocPrivate);
+  PLHashTableInfo::Assembler assembler(*info);
+  info->NewBoundFunction(assembler, PLHashComputeHash, info, 1, aKeyHash);
+  *aAllocOps = &gWrapPLHashAllocOps;
+  *aAllocPrivate = info;
+}
+
+void
+DestroyPLHashTableCallbacks(void* aAllocPrivate)
+{
+  PLHashTableInfo* info = PLHashTableInfo::FromPrivate(aAllocPrivate);
+  delete info;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// PLDHashTable Stabilization
+///////////////////////////////////////////////////////////////////////////////
+
+// For each PLDHashTable in the process, a PLDHashTableInfo is generated. This
+// structure is supplied to its callbacks using bound functions.
+struct PLDHashTableInfo : public StableHashTableInfo
+{
+  // Original callbacks for the table.
+  const PLDHashTableOps* mOps;
+
+  // Wrapper callbacks for the table.
+  PLDHashTableOps mNewOps;
+
+  explicit PLDHashTableInfo(const PLDHashTableOps* aOps)
+    : mOps(aOps)
+  {
+    PodZero(&mNewOps);
+  }
+
+  static PLDHashTableInfo* MaybeFromOps(const PLDHashTableOps* aOps) {
+    PLDHashTableInfo* res = reinterpret_cast<PLDHashTableInfo*>
+      ((uint8_t*)aOps - offsetof(PLDHashTableInfo, mNewOps));
+    return res->AppearsValid() ? res : nullptr;
+  }
+
+  static PLDHashTableInfo* FromOps(const PLDHashTableOps* aOps) {
+    PLDHashTableInfo* res = MaybeFromOps(aOps);
+    MOZ_RELEASE_ASSERT(res);
+    return res;
+  }
+};
+
+static PLDHashNumber
+PLDHashTableComputeHash(const void* aKey, PLDHashTableInfo* aInfo)
+{
+  uint32_t originalHash = aInfo->mOps->hashKey(aKey);
+  HashNumber newHash;
+  if (aInfo->HasMatchingKey(originalHash,
+                            [=](const void* aExistingKey) {
+                              return aInfo->mOps->matchEntry((PLDHashEntryHdr*) aExistingKey, aKey);
+                            }, &newHash)) {
+    return newHash;
+  }
+  return aInfo->SetLastKey(aKey);
+}
+
+static void
+PLDHashTableMoveEntry(PLDHashTable* aTable, const PLDHashEntryHdr* aFrom, PLDHashEntryHdr* aTo,
+                      PLDHashTableInfo* aInfo)
+{
+  aInfo->mOps->moveEntry(aTable, aFrom, aTo);
+
+  uint32_t originalHash = aInfo->GetOriginalHashNumber(aFrom);
+  uint32_t newHash = aInfo->FindKeyHash(originalHash, aFrom);
+
+  aInfo->RemoveKey(originalHash, aFrom);
+  aInfo->AddKey(originalHash, aTo, newHash);
+}
+
+static void
+PLDHashTableClearEntry(PLDHashTable* aTable, PLDHashEntryHdr* aEntry, PLDHashTableInfo* aInfo)
+{
+  aInfo->mOps->clearEntry(aTable, aEntry);
+
+  uint32_t originalHash = aInfo->GetOriginalHashNumber(aEntry);
+  aInfo->RemoveKey(originalHash, aEntry);
+}
+
+static void
+PLDHashTableInitEntry(PLDHashEntryHdr* aEntry, const void* aKey, PLDHashTableInfo* aInfo)
+{
+  if (aInfo->mOps->initEntry) {
+    aInfo->mOps->initEntry(aEntry, aKey);
+  }
+
+  uint32_t originalHash = aInfo->mOps->hashKey(aKey);
+  aInfo->AddKey(originalHash, aEntry, aInfo->GetLastNewHash(aKey));
+}
+
+extern "C" {
+
+MOZ_EXPORT const PLDHashTableOps*
+RecordReplayInterface_InternalGeneratePLDHashTableCallbacks(const PLDHashTableOps* aOps)
+{
+  PLDHashTableInfo* info = new PLDHashTableInfo(aOps);
+  PLDHashTableInfo::Assembler assembler(*info);
+  info->NewBoundFunction(assembler, PLDHashTableComputeHash, info, 1, &info->mNewOps.hashKey);
+  info->mNewOps.matchEntry = aOps->matchEntry;
+  info->NewBoundFunction(assembler, PLDHashTableMoveEntry, info, 3, &info->mNewOps.moveEntry);
+  info->NewBoundFunction(assembler, PLDHashTableClearEntry, info, 2, &info->mNewOps.clearEntry);
+  info->NewBoundFunction(assembler, PLDHashTableInitEntry, info, 2, &info->mNewOps.initEntry);
+  return &info->mNewOps;
+}
+
+MOZ_EXPORT const PLDHashTableOps*
+RecordReplayInterface_InternalUnwrapPLDHashTableCallbacks(const PLDHashTableOps* aOps)
+{
+  PLDHashTableInfo* info = PLDHashTableInfo::FromOps(aOps);
+  return info->mOps;
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalDestroyPLDHashTableCallbacks(const PLDHashTableOps* aOps)
+{
+  // Primordial PLDHashTables used in the copy constructor might not have any ops.
+  if (!aOps) {
+    return;
+  }
+
+  // Note: PLDHashTables with static ctors might have been constructed before
+  // record/replay state was initialized and have their normal ops. Check the
+  // magic number via MaybeFromOps before destroying the info.
+  PLDHashTableInfo* info = PLDHashTableInfo::MaybeFromOps(aOps);
+  delete info;
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalMovePLDHashTableContents(const PLDHashTableOps* aFirstOps,
+                                                       const PLDHashTableOps* aSecondOps)
+{
+  PLDHashTableInfo* firstInfo = PLDHashTableInfo::FromOps(aFirstOps);
+  PLDHashTableInfo* secondInfo = PLDHashTableInfo::FromOps(aSecondOps);
+
+  secondInfo->MoveContentsFrom(*firstInfo);
+}
+
+} // extern "C"
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/HashTable.h
@@ -0,0 +1,27 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_HashTable_h
+#define mozilla_recordreplay_HashTable_h
+
+#include "plhash.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Routines for creating specialized callbacks for PLHashTables that preserve
+// iteration order, similar to those for PLDHashTables in RecordReplay.h.
+void GeneratePLHashTableCallbacks(PLHashFunction* aKeyHash,
+				  PLHashComparator* aKeyCompare,
+				  PLHashComparator* aValueCompare,
+				  const PLHashAllocOps** aAllocOps,
+				  void** aAllocPrivate);
+void DestroyPLHashTableCallbacks(void* aAllocPrivate);
+
+} // recordreplay
+} // mozilla
+
+#endif // mozilla_recordreplay_HashTable_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/InfallibleVector.h
@@ -0,0 +1,140 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_InfallibleVector_h
+#define mozilla_recordreplay_InfallibleVector_h
+
+#include "mozilla/Vector.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// This file declares two classes, InfallibleVector and StaticInfallibleVector,
+// which behave like normal vectors except that all their operations are
+// infallible: we will immediately crash if any operation on the underlying
+// vector fails.
+//
+// StaticInfallibleVector is designed for use in static storage, and does not
+// have a static constructor or destructor in release builds.
+
+template<typename Outer, typename T, size_t MinInlineCapacity, class AllocPolicy>
+class InfallibleVectorOperations
+{
+  typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
+  InnerVector& Vector() { return static_cast<Outer*>(this)->Vector(); }
+  const InnerVector& Vector() const { return static_cast<const Outer*>(this)->Vector(); }
+
+public:
+  size_t length() const { return Vector().length(); }
+  bool empty() const { return Vector().empty(); }
+  T* begin() { return Vector().begin(); }
+  const T* begin() const { return Vector().begin(); }
+  T* end() { return Vector().end(); }
+  const T* end() const { return Vector().end(); }
+  T& operator[](size_t aIndex) { return Vector()[aIndex]; }
+  const T& operator[](size_t aIndex) const { return Vector()[aIndex]; }
+  T& back() { return Vector().back(); }
+  const T& back() const { return Vector().back(); }
+  void popBack() { Vector().popBack(); }
+  T popCopy() { return Vector().popCopy(); }
+  void erase(T* aT) { Vector().erase(aT); }
+  void clear() { Vector().clear(); }
+
+  void reserve(size_t aRequest) {
+    if (!Vector().reserve(aRequest)) {
+      MOZ_CRASH();
+    }
+  }
+
+  void resize(size_t aNewLength) {
+    if (!Vector().resize(aNewLength)) {
+      MOZ_CRASH();
+    }
+  }
+
+  template<typename U> void append(U&& aU) {
+    if (!Vector().append(std::forward<U>(aU))) {
+      MOZ_CRASH();
+    }
+  }
+
+  template<typename U> void append(const U* aBegin, size_t aLength) {
+    if (!Vector().append(aBegin, aLength)) {
+      MOZ_CRASH();
+    }
+  }
+
+  void appendN(const T& aT, size_t aN) {
+    if (!Vector().appendN(aT, aN)) {
+      MOZ_CRASH();
+    }
+  }
+
+  template<typename... Args> void emplaceBack(Args&&... aArgs) {
+    if (!Vector().emplaceBack(std::forward<Args>(aArgs)...)) {
+      MOZ_CRASH();
+    }
+  }
+
+  template<typename... Args> void infallibleEmplaceBack(Args&&... aArgs) {
+    Vector().infallibleEmplaceBack(std::forward<Args>(aArgs)...);
+  }
+
+  template<typename U> void insert(T* aP, U&& aVal) {
+    if (!Vector().insert(aP, std::forward<U>(aVal))) {
+      MOZ_CRASH();
+    }
+  }
+};
+
+template<typename T,
+         size_t MinInlineCapacity = 0,
+         class AllocPolicy = MallocAllocPolicy>
+class InfallibleVector
+  : public InfallibleVectorOperations<InfallibleVector<T, MinInlineCapacity, AllocPolicy>,
+                                      T, MinInlineCapacity, AllocPolicy>
+{
+  typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
+  InnerVector mVector;
+
+public:
+  InnerVector& Vector() { return mVector; }
+  const InnerVector& Vector() const { return mVector; }
+};
+
+template<typename T,
+         size_t MinInlineCapacity = 0,
+         class AllocPolicy = MallocAllocPolicy>
+class StaticInfallibleVector
+  : public InfallibleVectorOperations<StaticInfallibleVector<T, MinInlineCapacity, AllocPolicy>,
+                                      T, MinInlineCapacity, AllocPolicy>
+{
+  typedef Vector<T, MinInlineCapacity, AllocPolicy> InnerVector;
+  mutable InnerVector* mVector;
+
+  void EnsureVector() const {
+    if (!mVector) {
+      // N.B. This class can only be used with alloc policies that have a
+      // default constructor.
+      AllocPolicy policy;
+      void* memory = policy.template pod_malloc<InnerVector>(1);
+      MOZ_RELEASE_ASSERT(memory);
+      mVector = new(memory) InnerVector();
+    }
+  }
+
+public:
+  // InfallibleVectors are allocated in static storage and should not have
+  // constructors. Their memory will be initially zero.
+
+  InnerVector& Vector() { EnsureVector(); return *mVector; }
+  const InnerVector& Vector() const { EnsureVector(); return *mVector; }
+};
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_InfallibleVector_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Lock.cpp
@@ -0,0 +1,236 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "Lock.h"
+
+#include "mozilla/StaticMutex.h"
+
+#include "ChunkAllocator.h"
+#include "InfallibleVector.h"
+#include "SpinLock.h"
+#include "Thread.h"
+
+#include <unordered_map>
+
+namespace mozilla {
+namespace recordreplay {
+
+// The total number of locks that have been created. Reserved IDs:
+// 0: Locks that are not recorded.
+// 1: Used by gAtomicLock for atomic accesses.
+//
+// This is only used while recording, and increments gradually as locks are
+// created.
+static const size_t gAtomicLockId = 1;
+static Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gNumLocks;
+
+struct LockAcquires
+{
+  // List of thread acquire orders for the lock. This is protected by the lock
+  // itself.
+  Stream* mAcquires;
+
+  // During replay, the next thread id to acquire the lock. Writes to this are
+  // protected by the lock itself, though reads may occur on other threads.
+  Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mNextOwner;
+
+  static const size_t NoNextOwner = 0;
+
+  void ReadAndNotifyNextOwner(Thread* aCurrentThread) {
+    MOZ_RELEASE_ASSERT(IsReplaying());
+    if (mAcquires->AtEnd()) {
+      mNextOwner = NoNextOwner;
+    } else {
+      mNextOwner = mAcquires->ReadScalar();
+      if (mNextOwner != aCurrentThread->Id()) {
+        Thread::Notify(mNextOwner);
+      }
+    }
+  }
+};
+
+// Acquires for each lock, indexed by the lock ID.
+static ChunkAllocator<LockAcquires> gLockAcquires;
+
+///////////////////////////////////////////////////////////////////////////////
+// Locking Interface
+///////////////////////////////////////////////////////////////////////////////
+
+// Table mapping native lock pointers to the associated Lock structure, for
+// every recorded lock in existence.
+typedef std::unordered_map<void*, Lock*> LockMap;
+static LockMap* gLocks;
+static ReadWriteSpinLock gLocksLock;
+
+/* static */ void
+Lock::New(void* aNativeLock)
+{
+  if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
+    Destroy(aNativeLock); // Clean up any old lock, as below.
+    return;
+  }
+
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+  Thread* thread = Thread::Current();
+
+  RecordReplayAssert("CreateLock");
+
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::CreateLock);
+
+  size_t id;
+  if (IsRecording()) {
+    id = gNumLocks++;
+  }
+  thread->Events().RecordOrReplayScalar(&id);
+
+  LockAcquires* info = gLockAcquires.Create(id);
+  info->mAcquires = gRecordingFile->OpenStream(StreamName::Lock, id);
+
+  if (IsReplaying()) {
+    info->ReadAndNotifyNextOwner(thread);
+  }
+
+  // Tolerate new locks being created with identical pointers, even if there
+  // was no DestroyLock call for the old one.
+  Destroy(aNativeLock);
+
+  AutoWriteSpinLock ex(gLocksLock);
+  thread->BeginDisallowEvents();
+
+  if (!gLocks) {
+    gLocks = new LockMap();
+  }
+
+  gLocks->insert(LockMap::value_type(aNativeLock, new Lock(id)));
+
+  thread->EndDisallowEvents();
+}
+
+/* static */ void
+Lock::Destroy(void* aNativeLock)
+{
+  Lock* lock = nullptr;
+  {
+    AutoWriteSpinLock ex(gLocksLock);
+    if (gLocks) {
+      LockMap::iterator iter = gLocks->find(aNativeLock);
+      if (iter != gLocks->end()) {
+        lock = iter->second;
+        gLocks->erase(iter);
+      }
+    }
+  }
+  delete lock;
+}
+
+/* static */ Lock*
+Lock::Find(void* aNativeLock)
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+
+  AutoReadSpinLock ex(gLocksLock);
+
+  if (gLocks) {
+    LockMap::iterator iter = gLocks->find(aNativeLock);
+    if (iter != gLocks->end()) {
+      // Now that we know the lock is recorded, check whether thread events
+      // should be generated right now. Doing things in this order avoids
+      // reentrancy issues when initializing the thread-local state used by
+      // these calls.
+      if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
+        return nullptr;
+      }
+      return iter->second;
+    }
+  }
+
+  return nullptr;
+}
+
+void
+Lock::Enter(const std::function<void()>& aCallback)
+{
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough() && !HasDivergedFromRecording());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+
+  RecordReplayAssert("Lock %d", (int) mId);
+
+  // Include an event in each thread's record when a lock acquire begins. This
+  // is not required by the replay but is used to check that lock acquire order
+  // is consistent with the recording and that we will fail explicitly instead
+  // of deadlocking.
+  Thread* thread = Thread::Current();
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Lock);
+  thread->Events().CheckInput(mId);
+
+  LockAcquires* acquires = gLockAcquires.Get(mId);
+  if (IsRecording()) {
+    acquires->mAcquires->WriteScalar(thread->Id());
+  } else {
+    // Wait until this thread is next in line to acquire the lock.
+    while (thread->Id() != acquires->mNextOwner) {
+      Thread::Wait();
+    }
+    // Acquire the lock before updating the next owner.
+    aCallback();
+    acquires->ReadAndNotifyNextOwner(thread);
+  }
+}
+
+struct AtomicLock : public detail::MutexImpl
+{
+  using detail::MutexImpl::lock;
+  using detail::MutexImpl::unlock;
+};
+
+// Lock which is held during code sections that run atomically. This is a
+// PRLock instead of an OffTheBooksMutex because the latter performs atomic
+// operations during initialization.
+static AtomicLock* gAtomicLock = nullptr;
+
+/* static */ void
+Lock::InitializeLocks()
+{
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
+  gNumLocks = gAtomicLockId;
+
+  gAtomicLock = new AtomicLock();
+  MOZ_RELEASE_ASSERT(!IsRecording() || gNumLocks == gAtomicLockId + 1);
+}
+
+/* static */ void
+Lock::LockAquiresUpdated(size_t aLockId)
+{
+  LockAcquires* acquires = gLockAcquires.MaybeGet(aLockId);
+  if (acquires && acquires->mAcquires && acquires->mNextOwner == LockAcquires::NoNextOwner) {
+    acquires->ReadAndNotifyNextOwner(Thread::Current());
+  }
+}
+
+extern "C" {
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalBeginOrderedAtomicAccess()
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+  if (!gInitializationFailureMessage) {
+    gAtomicLock->lock();
+  }
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalEndOrderedAtomicAccess()
+{
+  MOZ_RELEASE_ASSERT(IsRecordingOrReplaying());
+  if (!gInitializationFailureMessage) {
+    gAtomicLock->unlock();
+  }
+}
+
+} // extern "C"
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Lock.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_Lock_h
+#define mozilla_recordreplay_Lock_h
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/Types.h"
+
+#include "File.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Recorded Locks Overview.
+//
+// Each platform has some types used for native locks (e.g. pthread_mutex_t or
+// CRITICAL_SECTION). System APIs which operate on these native locks are
+// redirected so that lock behavior can be tracked. If a native lock is
+// created when thread events are not passed through then that native lock is
+// recorded, and lock acquire orders will be replayed in the same order with
+// which they originally occurred.
+
+// Information about a recorded lock.
+class Lock
+{
+  // Unique ID for this lock.
+  size_t mId;
+
+public:
+  explicit Lock(size_t aId)
+    : mId(aId)
+  {
+    MOZ_ASSERT(aId);
+  }
+
+  size_t Id() { return mId; }
+
+  // When recording, this is called after the lock has been acquired, and
+  // records the acquire in the lock's acquire order stream. When replaying,
+  // this is called before the lock has been acquired, and blocks the thread
+  // until it is next in line to acquire the lock before acquiring it via
+  // aCallback.
+  void Enter(const std::function<void()>& aCallback);
+
+  // Create a new Lock corresponding to a native lock, with a fresh ID.
+  static void New(void* aNativeLock);
+
+  // Destroy any Lock associated with a native lock.
+  static void Destroy(void* aNativeLock);
+
+  // Get the recorded Lock for a native lock if there is one, otherwise null.
+  static Lock* Find(void* aNativeLock);
+
+  // Initialize locking state.
+  static void InitializeLocks();
+
+  // Note that new data has been read into a lock's acquires stream.
+  static void LockAquiresUpdated(size_t aLockId);
+};
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_Lock_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/MemorySnapshot.cpp
@@ -0,0 +1,1371 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "MemorySnapshot.h"
+
+#include "ipc/ChildInternal.h"
+#include "mozilla/Maybe.h"
+#include "DirtyMemoryHandler.h"
+#include "InfallibleVector.h"
+#include "ProcessRecordReplay.h"
+#include "ProcessRewind.h"
+#include "SpinLock.h"
+#include "SplayTree.h"
+#include "Thread.h"
+
+#include <algorithm>
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+
+// Define to enable the countdown debugging thread. See StartCountdown().
+//#define WANT_COUNTDOWN_THREAD 1
+
+namespace mozilla {
+namespace recordreplay {
+
+///////////////////////////////////////////////////////////////////////////////
+// Memory Snapshots Overview.
+//
+// Checkpoints are periodically saved, storing in memory enough information
+// for the process to restore the contents of all tracked memory as it
+// rewinds to earlier checkpoitns. There are two components to a saved
+// checkpoint:
+//
+// - Stack contents for each thread are completely saved on disk at each saved
+//   checkpoint. This is handled by ThreadSnapshot.cpp
+//
+// - Heap and static memory contents (tracked memory) are saved in memory as
+//   the contents of pages modified before either the the next saved checkpoint
+//   or the current execution point (if this is the last saved checkpoint).
+//   This is handled here.
+//
+// Heap memory is only tracked when allocated with TrackedMemoryKind.
+//
+// Snapshots of heap/static memory is modeled on the copy-on-write semantics
+// used by fork. Instead of actually forking, we use write-protected memory and
+// a fault handler to perform the copy-on-write, which both gives more control
+// of the snapshot process and allows snapshots to be taken on platforms
+// without fork (i.e. Windows). The following example shows how snapshots are
+// generated:
+//
+// #1 Save Checkpoint A. The initial snapshot tabulates all allocated tracked
+//    memory in the process, and write-protects all of it.
+//
+// #2 Write pages P0 and P1. Writing to the pages trips the fault handler. The
+//    handler creates copies of the initial contents of P0 and P1 (P0a and P1a)
+//    and unprotects the pages.
+//
+// #3 Save Checkpoint B. P0a and P1a, along with any other pages modified
+//    between A and B, become associated with checkpoint A. All modified pages
+//    are reprotected.
+//
+// #4 Write pages P1 and P2. Again, writing to the pages trips the fault
+//    handler and copies P1b and P2b are created and the pages are unprotected.
+//
+// #5 Save Checkpoint C. P1b and P2b become associated with snapshot B, and the
+//    modified pages are reprotected.
+//
+// If we were to then rewind from C to A, we would read and restore P1b/P2b,
+// followed by P0a/P1a. All data associated with checkpoints A and later is
+// discarded (we can only rewind; we cannot jump forward in time).
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Snapshot Threads Overview.
+//
+// After step #3 above, the main thread has created a diff snapshot with the
+// copies of the original contents of pages modified between two saved
+// checkpoints. These page copies are initially all in memory. It is the
+// responsibility of the snapshot threads to do the following:
+//
+// 1. When rewinding to the last saved checkpoint, snapshot threads are used to
+//    restore the original contents of pages using their in-memory copies.
+//
+// There are a fixed number of snapshot threads that are spawned when the
+// first checkpoint is saved. Threads are each responsible for distinct sets of
+// heap memory pages (see AddDirtyPageToWorklist), avoiding synchronization
+// issues between different snapshot threads.
+///////////////////////////////////////////////////////////////////////////////
+
+///////////////////////////////////////////////////////////////////////////////
+// Memory Snapshot Structures
+///////////////////////////////////////////////////////////////////////////////
+
+// A region of allocated memory which should be tracked by MemoryInfo.
+struct AllocatedMemoryRegion {
+  uint8_t* mBase;
+  size_t mSize;
+  bool mExecutable;
+
+  AllocatedMemoryRegion()
+    : mBase(nullptr), mSize(0), mExecutable(false)
+  {}
+
+  AllocatedMemoryRegion(uint8_t* aBase, size_t aSize, bool aExecutable)
+    : mBase(aBase), mSize(aSize), mExecutable(aExecutable)
+  {}
+
+  // For sorting regions by base address.
+  struct AddressSort {
+    typedef void* Lookup;
+    static void* getLookup(const AllocatedMemoryRegion& aRegion) {
+      return aRegion.mBase;
+    }
+    static ssize_t compare(void* aAddress, const AllocatedMemoryRegion& aRegion) {
+      return (uint8_t*) aAddress - aRegion.mBase;
+    }
+  };
+
+  // For sorting regions by size, from largest to smallest.
+  struct SizeReverseSort {
+    typedef size_t Lookup;
+    static size_t getLookup(const AllocatedMemoryRegion& aRegion) {
+      return aRegion.mSize;
+    }
+    static ssize_t compare(size_t aSize, const AllocatedMemoryRegion& aRegion) {
+      return aRegion.mSize - aSize;
+    }
+  };
+};
+
+// Information about a page which was modified between two saved checkpoints.
+struct DirtyPage {
+  // Base address of the page.
+  uint8_t* mBase;
+
+  // Copy of the page at the first checkpoint. Written by the dirty memory
+  // handler via HandleDirtyMemoryFault if this is in the active page set,
+  // otherwise accessed by snapshot threads.
+  uint8_t* mOriginal;
+
+  bool mExecutable;
+
+  DirtyPage(uint8_t* aBase, uint8_t* aOriginal, bool aExecutable)
+    : mBase(aBase), mOriginal(aOriginal), mExecutable(aExecutable)
+  {}
+
+  struct AddressSort {
+    typedef uint8_t* Lookup;
+    static uint8_t* getLookup(const DirtyPage& aPage) {
+      return aPage.mBase;
+    }
+    static ssize_t compare(uint8_t* aBase, const DirtyPage& aPage) {
+      return aBase - aPage.mBase;
+    }
+  };
+};
+
+// A set of dirty pages that can be searched quickly.
+typedef SplayTree<DirtyPage, DirtyPage::AddressSort,
+                  AllocPolicy<MemoryKind::SortedDirtyPageSet>, 4> SortedDirtyPageSet;
+
+// A set of dirty pages associated with some checkpoint.
+struct DirtyPageSet {
+  // Checkpoint associated with this set.
+  CheckpointId mCheckpoint;
+
+  // All dirty pages in the set. Pages may be added or destroyed by the main
+  // thread when all other threads are idle, by the dirty memory handler when
+  // it is active and this is the active page set, and by the snapshot thread
+  // which owns this set.
+  InfallibleVector<DirtyPage, 256, AllocPolicy<MemoryKind::DirtyPageSet>> mPages;
+
+  explicit DirtyPageSet(const CheckpointId& aCheckpoint)
+    : mCheckpoint(aCheckpoint)
+  {}
+};
+
+// Worklist used by each snapshot thread.
+struct SnapshotThreadWorklist {
+  // Index into gMemoryInfo->mSnapshotWorklists of the thread.
+  size_t mThreadIndex;
+
+  // Record/replay ID of the thread.
+  size_t mThreadId;
+
+  // Sets of pages in the thread's worklist. Each set is for a different diff,
+  // with the oldest checkpoints first.
+  InfallibleVector<DirtyPageSet, 256, AllocPolicy<MemoryKind::Generic>> mSets;
+};
+
+// Structure used to coordinate activity between the main thread and all
+// snapshot threads. The workflow with this structure is as follows:
+//
+// 1. The main thread calls ActivateBegin(), marking the condition as active
+//    and notifying each snapshot thread. The main thread blocks in this call.
+//
+// 2. Each snapshot thread, maybe after waking up, checks the condition, does
+//    any processing it needs to (knowing the main thread is blocked) and
+//    then calls WaitUntilNoLongerActive(), blocking in the call.
+//
+// 3. Once all snapshot threads are blocked in WaitUntilNoLongerActive(), the
+//    main thread is unblocked from ActivateBegin(). It can then do whatever
+//    processing it needs to (knowing all snapshot threads are blocked) and
+//    then calls ActivateEnd(), blocking in the call.
+//
+// 4. Snapshot threads are now unblocked from WaitUntilNoLongerActive(). The
+//    main thread does not unblock from ActivateEnd() until all snapshot
+//    threads have left WaitUntilNoLongerActive().
+//
+// The intent with this class is to ensure that the main thread knows exactly
+// when the snapshot threads are operating and that there is no potential for
+// races between them.
+class SnapshotThreadCondition {
+  Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mActive;
+  Atomic<int32_t, SequentiallyConsistent, Behavior::DontPreserve> mCount;
+
+public:
+  void ActivateBegin();
+  void ActivateEnd();
+  bool IsActive();
+  void WaitUntilNoLongerActive();
+};
+
+static const size_t NumSnapshotThreads = 8;
+
+// A set of free regions in the process. There are two of these, for the
+// free regions in tracked and untracked memory.
+class FreeRegionSet {
+  // Kind of memory being managed. This also describes the memory used by the
+  // set itself.
+  MemoryKind mKind;
+
+  // Lock protecting contents of the structure.
+  SpinLock mLock;
+
+  // To avoid reentrancy issues when growing the set, a chunk of pages for
+  // the splay tree is preallocated for use the next time the tree needs to
+  // expand its size.
+  static const size_t ChunkPages = 4;
+  void* mNextChunk;
+
+  // Ensure there is a chunk available for the splay tree.
+  void MaybeRefillNextChunk(AutoSpinLock& aLockHeld);
+
+  // Get the next chunk from the free region set for this memory kind.
+  void* TakeNextChunk();
+
+  struct MyAllocPolicy {
+    FreeRegionSet& mSet;
+
+    template <typename T>
+    void free_(T* aPtr, size_t aSize) { MOZ_CRASH(); }
+
+    template <typename T>
+    T* pod_malloc(size_t aNumElems) {
+      MOZ_RELEASE_ASSERT(sizeof(T) * aNumElems <= ChunkPages * PageSize);
+      return (T*) mSet.TakeNextChunk();
+    }
+
+    explicit MyAllocPolicy(FreeRegionSet& aSet)
+      : mSet(aSet)
+    {}
+  };
+
+  // All memory in gMemoryInfo->mTrackedRegions that is not in use at the current
+  // point in execution.
+  typedef SplayTree<AllocatedMemoryRegion,
+                    AllocatedMemoryRegion::SizeReverseSort,
+                    MyAllocPolicy, ChunkPages> Tree;
+  Tree mRegions;
+
+  void InsertLockHeld(void* aAddress, size_t aSize, AutoSpinLock& aLockHeld);
+  void* ExtractLockHeld(size_t aSize, AutoSpinLock& aLockHeld);
+
+public:
+  explicit FreeRegionSet(MemoryKind aKind)
+    : mKind(aKind), mRegions(MyAllocPolicy(*this))
+  {}
+
+  // Get the single region set for a given memory kind.
+  static FreeRegionSet& Get(MemoryKind aKind);
+
+  // Add a free region to the set.
+  void Insert(void* aAddress, size_t aSize);
+
+  // Remove a free region of the specified size. If aAddress is specified then
+  // this address will be prioritized, but a different pointer may be returned.
+  // The resulting memory will be zeroed.
+  void* Extract(void* aAddress, size_t aSize);
+
+  // Return whether a memory range intersects this set at all.
+  bool Intersects(void* aAddress, size_t aSize);
+};
+
+// Information about the current memory state. The contents of this structure
+// are in untracked memory.
+struct MemoryInfo {
+  // Whether new dirty pages or allocated regions are allowed.
+  bool mMemoryChangesAllowed;
+
+  // Untracked memory regions allocated before the first checkpoint. This is only
+  // accessed on the main thread, and is not a vector because of reentrancy
+  // issues.
+  static const size_t MaxInitialUntrackedRegions = 256;
+  AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
+  SpinLock mInitialUntrackedRegionsLock;
+
+  // All tracked memory in the process. This may be updated by any thread while
+  // holding mTrackedRegionsLock.
+  SplayTree<AllocatedMemoryRegion, AllocatedMemoryRegion::AddressSort,
+            AllocPolicy<MemoryKind::TrackedRegions>, 4>
+    mTrackedRegions;
+  InfallibleVector<AllocatedMemoryRegion, 512, AllocPolicy<MemoryKind::TrackedRegions>>
+    mTrackedRegionsByAllocationOrder;
+  SpinLock mTrackedRegionsLock;
+
+  // Pages from |trackedRegions| modified since the last saved checkpoint.
+  // Accessed by any thread (usually the dirty memory handler) when memory
+  // changes are allowed, and by the main thread when memory changes are not
+  // allowed.
+  SortedDirtyPageSet mActiveDirty;
+  SpinLock mActiveDirtyLock;
+
+  // All untracked memory which is available for new allocations.
+  FreeRegionSet mFreeUntrackedRegions;
+
+  // Worklists for each snapshot thread.
+  SnapshotThreadWorklist mSnapshotWorklists[NumSnapshotThreads];
+
+  // Whether snapshot threads should update memory to that when the last saved
+  // diff was started.
+  SnapshotThreadCondition mSnapshotThreadsShouldRestore;
+
+  // Whether snapshot threads should idle.
+  SnapshotThreadCondition mSnapshotThreadsShouldIdle;
+
+  // Counter used by the countdown thread.
+  Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mCountdown;
+
+  // Information for timers.
+  double mStartTime;
+  uint32_t mTimeHits[(size_t) TimerKind::Count];
+  double mTimeTotals[(size_t) TimerKind::Count];
+
+  // Information for memory allocation.
+  Atomic<ssize_t, Relaxed, Behavior::DontPreserve> mMemoryBalance[(size_t) MemoryKind::Count];
+
+  // Recent dirty memory faults.
+  void* mDirtyMemoryFaults[50];
+
+  // Whether RecordReplayDirective may crash this process.
+  bool mIntentionalCrashesAllowed;
+
+  // Whether the CrashSoon directive has been given to this process.
+  bool mCrashSoon;
+
+  MemoryInfo()
+    : mMemoryChangesAllowed(true)
+    , mFreeUntrackedRegions(MemoryKind::FreeRegions)
+    , mStartTime(CurrentTime())
+    , mIntentionalCrashesAllowed(true)
+  {
+    // The singleton MemoryInfo is allocated with zeroed memory, so other
+    // fields do not need explicit initialization.
+  }
+};
+
+static MemoryInfo* gMemoryInfo = nullptr;
+
+void
+SetMemoryChangesAllowed(bool aAllowed)
+{
+  MOZ_RELEASE_ASSERT(gMemoryInfo->mMemoryChangesAllowed == !aAllowed);
+  gMemoryInfo->mMemoryChangesAllowed = aAllowed;
+}
+
+static void
+EnsureMemoryChangesAllowed()
+{
+  while (!gMemoryInfo->mMemoryChangesAllowed) {
+    ThreadYield();
+  }
+}
+
+void
+StartCountdown(size_t aCount)
+{
+  gMemoryInfo->mCountdown = aCount;
+}
+
+AutoCountdown::AutoCountdown(size_t aCount)
+{
+  StartCountdown(aCount);
+}
+
+AutoCountdown::~AutoCountdown()
+{
+  StartCountdown(0);
+}
+
+#ifdef WANT_COUNTDOWN_THREAD
+
+static void
+CountdownThreadMain(void*)
+{
+  while (true) {
+    if (gMemoryInfo->mCountdown && --gMemoryInfo->mCountdown == 0) {
+      // When debugging hangs in the child process, we can break here in lldb
+      // to inspect what the process is doing.
+      child::ReportFatalError("CountdownThread activated");
+    }
+    ThreadYield();
+  }
+}
+
+#endif // WANT_COUNTDOWN_THREAD
+
+///////////////////////////////////////////////////////////////////////////////
+// Profiling
+///////////////////////////////////////////////////////////////////////////////
+
+AutoTimer::AutoTimer(TimerKind aKind)
+  : mKind(aKind), mStart(CurrentTime())
+{}
+
+AutoTimer::~AutoTimer()
+{
+  if (gMemoryInfo) {
+    gMemoryInfo->mTimeHits[(size_t) mKind]++;
+    gMemoryInfo->mTimeTotals[(size_t) mKind] += CurrentTime() - mStart;
+  }
+}
+
+static const char* gTimerKindNames[] = {
+#define DefineTimerKindName(aKind) #aKind,
+  ForEachTimerKind(DefineTimerKindName)
+#undef DefineTimerKindName
+};
+
+void
+DumpTimers()
+{
+  if (!gMemoryInfo) {
+    return;
+  }
+  Print("Times %.2fs\n", (CurrentTime() - gMemoryInfo->mStartTime) / 1000000.0);
+  for (size_t i = 0; i < (size_t) TimerKind::Count; i++) {
+    uint32_t hits = gMemoryInfo->mTimeHits[i];
+    double time = gMemoryInfo->mTimeTotals[i];
+    Print("%s: %d hits, %.2fs\n",
+          gTimerKindNames[i], (int) hits, time / 1000000.0);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Directives
+///////////////////////////////////////////////////////////////////////////////
+
+void
+SetAllowIntentionalCrashes(bool aAllowed)
+{
+  gMemoryInfo->mIntentionalCrashesAllowed = aAllowed;
+}
+
+extern "C" {
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalRecordReplayDirective(long aDirective)
+{
+  switch ((Directive) aDirective) {
+  case Directive::CrashSoon:
+    gMemoryInfo->mCrashSoon = true;
+    break;
+  case Directive::MaybeCrash:
+    if (gMemoryInfo->mIntentionalCrashesAllowed && gMemoryInfo->mCrashSoon) {
+      PrintSpew("Intentionally Crashing!\n");
+      MOZ_CRASH("RecordReplayDirective intentional crash");
+    }
+    gMemoryInfo->mCrashSoon = false;
+    break;
+  case Directive::AlwaysSaveTemporaryCheckpoints:
+    navigation::AlwaysSaveTemporaryCheckpoints();
+    break;
+  case Directive::AlwaysMarkMajorCheckpoints:
+    child::NotifyAlwaysMarkMajorCheckpoints();
+    break;
+  default:
+    MOZ_CRASH("Unknown directive");
+  }
+}
+
+} // extern "C"
+
+///////////////////////////////////////////////////////////////////////////////
+// Snapshot Thread Conditions
+///////////////////////////////////////////////////////////////////////////////
+
+void
+SnapshotThreadCondition::ActivateBegin()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+  MOZ_RELEASE_ASSERT(!mActive);
+  mActive = true;
+  for (size_t i = 0; i < NumSnapshotThreads; i++) {
+    Thread::Notify(gMemoryInfo->mSnapshotWorklists[i].mThreadId);
+  }
+  while (mCount != NumSnapshotThreads) {
+    Thread::WaitNoIdle();
+  }
+}
+
+void
+SnapshotThreadCondition::ActivateEnd()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+  MOZ_RELEASE_ASSERT(mActive);
+  mActive = false;
+  for (size_t i = 0; i < NumSnapshotThreads; i++) {
+    Thread::Notify(gMemoryInfo->mSnapshotWorklists[i].mThreadId);
+  }
+  while (mCount) {
+    Thread::WaitNoIdle();
+  }
+}
+
+bool
+SnapshotThreadCondition::IsActive()
+{
+  MOZ_RELEASE_ASSERT(!Thread::CurrentIsMainThread());
+  return mActive;
+}
+
+void
+SnapshotThreadCondition::WaitUntilNoLongerActive()
+{
+  MOZ_RELEASE_ASSERT(!Thread::CurrentIsMainThread());
+  MOZ_RELEASE_ASSERT(mActive);
+  if (NumSnapshotThreads == ++mCount) {
+    Thread::Notify(MainThreadId);
+  }
+  while (mActive) {
+    Thread::WaitNoIdle();
+  }
+  if (0 == --mCount) {
+    Thread::Notify(MainThreadId);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Snapshot Page Allocation
+///////////////////////////////////////////////////////////////////////////////
+
+// Get a page in untracked memory that can be used as a copy of a tracked page.
+static uint8_t*
+AllocatePageCopy()
+{
+  return (uint8_t*) AllocateMemory(PageSize, MemoryKind::PageCopy);
+}
+
+// Free a page allocated by AllocatePageCopy.
+static void
+FreePageCopy(uint8_t* aPage)
+{
+  DeallocateMemory(aPage, PageSize, MemoryKind::PageCopy);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Page Fault Handling
+///////////////////////////////////////////////////////////////////////////////
+
+void
+MemoryMove(void* aDst, const void* aSrc, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT((size_t)aDst % sizeof(uint32_t) == 0);
+  MOZ_RELEASE_ASSERT((size_t)aSrc % sizeof(uint32_t) == 0);
+  MOZ_RELEASE_ASSERT(aSize % sizeof(uint32_t) == 0);
+  MOZ_RELEASE_ASSERT((size_t)aDst <= (size_t)aSrc || (size_t)aDst >= (size_t)aSrc + aSize);
+
+  uint32_t* ndst = (uint32_t*)aDst;
+  const uint32_t* nsrc = (const uint32_t*)aSrc;
+  for (size_t i = 0; i < aSize / sizeof(uint32_t); i++) {
+    ndst[i] = nsrc[i];
+  }
+}
+
+void
+MemoryZero(void* aDst, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT((size_t)aDst % sizeof(uint32_t) == 0);
+  MOZ_RELEASE_ASSERT(aSize % sizeof(uint32_t) == 0);
+
+  // Use volatile here to avoid annoying clang optimizations.
+  volatile uint32_t* ndst = (uint32_t*)aDst;
+  for (size_t i = 0; i < aSize / sizeof(uint32_t); i++) {
+    ndst[i] = 0;
+  }
+}
+
+// Return whether an address is in a tracked region. This excludes memory that
+// is in an active new region and is not write protected.
+static bool
+IsTrackedAddress(void* aAddress, bool* aExecutable)
+{
+  AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
+  Maybe<AllocatedMemoryRegion> region =
+    gMemoryInfo->mTrackedRegions.lookupClosestLessOrEqual(aAddress);
+  if (region.isSome() && MemoryContains(region.ref().mBase, region.ref().mSize, aAddress)) {
+    if (aExecutable) {
+      *aExecutable = region.ref().mExecutable;
+    }
+    return true;
+  }
+  return false;
+}
+
+bool
+HandleDirtyMemoryFault(uint8_t* aAddress)
+{
+  EnsureMemoryChangesAllowed();
+
+  bool different = false;
+  for (size_t i = ArrayLength(gMemoryInfo->mDirtyMemoryFaults) - 1; i; i--) {
+    gMemoryInfo->mDirtyMemoryFaults[i] = gMemoryInfo->mDirtyMemoryFaults[i - 1];
+    if (gMemoryInfo->mDirtyMemoryFaults[i] != aAddress) {
+      different = true;
+    }
+  }
+  gMemoryInfo->mDirtyMemoryFaults[0] = aAddress;
+  if (!different) {
+    Print("WARNING: Repeated accesses to the same dirty address %p\n", aAddress);
+  }
+
+  // Round down to the base of the page.
+  aAddress = PageBase(aAddress);
+
+  AutoSpinLock lock(gMemoryInfo->mActiveDirtyLock);
+
+  // Check to see if this is already an active dirty page. Once a page has been
+  // marked as dirty it will be accessible until the next checkpoint is saved,
+  // but it's possible for multiple threads to access the same protected memory
+  // before we have a chance to unprotect it, in which case we'll end up here
+  // multiple times for the page.
+  if (gMemoryInfo->mActiveDirty.maybeLookup(aAddress)) {
+    return true;
+  }
+
+  // Crash if this address is not in a tracked region.
+  bool executable;
+  if (!IsTrackedAddress(aAddress, &executable)) {
+    return false;
+  }
+
+  // Copy the page's original contents into the active dirty set, and unprotect
+  // it so that execution can proceed.
+  uint8_t* original = AllocatePageCopy();
+  MemoryMove(original, aAddress, PageSize);
+  gMemoryInfo->mActiveDirty.insert(aAddress, DirtyPage(aAddress, original, executable));
+  DirectUnprotectMemory(aAddress, PageSize, executable);
+  return true;
+}
+
+void
+UnrecoverableSnapshotFailure()
+{
+  AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
+  DirectUnprotectMemory(PageBase(&errno), PageSize, false);
+  for (auto region : gMemoryInfo->mTrackedRegionsByAllocationOrder) {
+    DirectUnprotectMemory(region.mBase, region.mSize, region.mExecutable,
+                          /* aIgnoreFailures = */ true);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Initial Memory Region Processing
+///////////////////////////////////////////////////////////////////////////////
+
+static void
+AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(!HasSavedCheckpoint());
+
+  if (gInitializationFailureMessage) {
+    return;
+  }
+
+  static void* gSkippedRegion;
+  if (!gSkippedRegion) {
+    // We are allocating gMemoryInfo itself, and will directly call this
+    // function again shortly.
+    gSkippedRegion = aBase;
+    return;
+  }
+  MOZ_RELEASE_ASSERT(gSkippedRegion == gMemoryInfo);
+
+  AutoSpinLock lock(gMemoryInfo->mInitialUntrackedRegionsLock);
+
+  for (AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
+    if (!region.mBase) {
+      region.mBase = aBase;
+      region.mSize = aSize;
+      return;
+    }
+  }
+
+  // If we end up here then MaxInitialUntrackedRegions should be larger.
+  MOZ_CRASH();
+}
+
+static void
+RemoveInitialUntrackedRegion(uint8_t* aBase, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(!HasSavedCheckpoint());
+  AutoSpinLock lock(gMemoryInfo->mInitialUntrackedRegionsLock);
+
+  for (AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
+    if (region.mBase == aBase) {
+      MOZ_RELEASE_ASSERT(region.mSize == aSize);
+      region.mBase = nullptr;
+      region.mSize = 0;
+      return;
+    }
+  }
+  MOZ_CRASH();
+}
+
+static void
+MarkThreadStacksAsUntracked()
+{
+  // Thread stacks are excluded from the tracked regions.
+  for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
+    Thread* thread = Thread::GetById(i);
+    AddInitialUntrackedMemoryRegion(thread->StackBase(), thread->StackSize());
+  }
+}
+
+// Given an address region [*aAddress, *aAddress + *aSize], return true if
+// there is any intersection with an excluded region
+// [aExclude, aExclude + aExcludeSize], set *aSize to contain the subregion
+// starting at aAddress which which is not excluded, and *aRemaining and
+// *aRemainingSize to any additional subregion which is not excluded.
+static bool
+MaybeExtractMemoryRegion(uint8_t* aAddress, size_t* aSize,
+                         uint8_t** aRemaining, size_t* aRemainingSize,
+                         uint8_t* aExclude, size_t aExcludeSize)
+{
+  uint8_t* addrLimit = aAddress + *aSize;
+
+  // Expand the excluded region out to the containing page boundaries.
+  MOZ_RELEASE_ASSERT((size_t)aExclude % PageSize == 0);
+  aExcludeSize = RoundupSizeToPageBoundary(aExcludeSize);
+
+  uint8_t* excludeLimit = aExclude + aExcludeSize;
+
+  if (excludeLimit <= aAddress || addrLimit <= aExclude) {
+    // No intersection.
+    return false;
+  }
+
+  *aSize = std::max<ssize_t>(aExclude - aAddress, 0);
+  if (aRemaining) {
+    *aRemaining = excludeLimit;
+    *aRemainingSize = std::max<ssize_t>(addrLimit - *aRemaining, 0);
+  }
+  return true;
+}
+
+// Set *aSize to describe the number of bytes starting at aAddress that should
+// be considered tracked memory. *aRemaining and *aRemainingSize are set to any
+// remaining portion of the initial region after the first excluded portion
+// that is found.
+static void
+ExtractTrackedInitialMemoryRegion(uint8_t* aAddress, size_t* aSize,
+                                  uint8_t** aRemaining, size_t* aRemainingSize)
+{
+  // Look for the earliest untracked region which intersects the given region.
+  const AllocatedMemoryRegion* earliestIntersect = nullptr;
+  for (const AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
+    size_t size = *aSize;
+    if (MaybeExtractMemoryRegion(aAddress, &size, nullptr, nullptr, region.mBase, region.mSize)) {
+      // There was an intersection.
+      if (!earliestIntersect || region.mBase < earliestIntersect->mBase) {
+        earliestIntersect = &region;
+      }
+    }
+  }
+
+  if (earliestIntersect) {
+    if (!MaybeExtractMemoryRegion(aAddress, aSize, aRemaining, aRemainingSize,
+                                  earliestIntersect->mBase, earliestIntersect->mSize)) {
+      MOZ_CRASH();
+    }
+  } else {
+    // If there is no intersection then the entire region is tracked.
+    *aRemaining = aAddress + *aSize;
+    *aRemainingSize = 0;
+  }
+}
+
+static void
+AddTrackedRegion(uint8_t* aAddress, size_t aSize, bool aExecutable)
+{
+  if (aSize) {
+    AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
+    gMemoryInfo->mTrackedRegions.insert(aAddress,
+                                        AllocatedMemoryRegion(aAddress, aSize, aExecutable));
+    gMemoryInfo->mTrackedRegionsByAllocationOrder.emplaceBack(aAddress, aSize, aExecutable);
+  }
+}
+
+// Add any tracked subregions of [aAddress, aAddress + aSize].
+void
+AddInitialTrackedMemoryRegions(uint8_t* aAddress, size_t aSize, bool aExecutable)
+{
+  while (aSize) {
+    uint8_t* remaining;
+    size_t remainingSize;
+    ExtractTrackedInitialMemoryRegion(aAddress, &aSize, &remaining, &remainingSize);
+
+    AddTrackedRegion(aAddress, aSize, aExecutable);
+
+    aAddress = remaining;
+    aSize = remainingSize;
+  }
+}
+
+static void UpdateNumTrackedRegionsForSnapshot();
+
+// Handle all initial untracked memory regions in the process.
+static void
+ProcessAllInitialMemoryRegions()
+{
+  MOZ_ASSERT(!AreThreadEventsPassedThrough());
+
+  {
+    AutoPassThroughThreadEvents pt;
+    for (mach_vm_address_t addr = 0;;) {
+      mach_vm_size_t nbytes;
+
+      vm_region_basic_info_64 info;
+      mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
+      mach_port_t some_port;
+      kern_return_t rv = mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
+                                        (vm_region_info_t) &info, &info_count, &some_port);
+      if (rv == KERN_INVALID_ADDRESS) {
+        break;
+      }
+      MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
+
+      if (info.max_protection & VM_PROT_WRITE) {
+        MOZ_RELEASE_ASSERT(info.max_protection & VM_PROT_READ);
+        AddInitialTrackedMemoryRegions(reinterpret_cast<uint8_t*>(addr), nbytes,
+                                       info.max_protection & VM_PROT_EXECUTE);
+      }
+
+      addr += nbytes;
+    }
+  }
+
+  UpdateNumTrackedRegionsForSnapshot();
+
+  // Write protect all tracked memory.
+  AutoDisallowMemoryChanges disallow;
+  for (const AllocatedMemoryRegion& region : gMemoryInfo->mTrackedRegionsByAllocationOrder) {
+    DirectWriteProtectMemory(region.mBase, region.mSize, region.mExecutable);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Free Region Management
+///////////////////////////////////////////////////////////////////////////////
+
+// All memory in gMemoryInfo->mTrackedRegions that is not in use at the current
+// point in execution.
+static FreeRegionSet gFreeRegions(MemoryKind::Tracked);
+
+// The size of gMemoryInfo->mTrackedRegionsByAllocationOrder we expect to see
+// at the point of the last saved checkpoint.
+static size_t gNumTrackedRegions;
+
+static void
+UpdateNumTrackedRegionsForSnapshot()
+{
+  MOZ_ASSERT(Thread::CurrentIsMainThread());
+  gNumTrackedRegions = gMemoryInfo->mTrackedRegionsByAllocationOrder.length();
+}
+
+void
+FixupFreeRegionsAfterRewind()
+{
+  // All memory that has been allocated since the associated checkpoint was
+  // reached is now free, and may be reused for new allocations.
+  size_t newTrackedRegions = gMemoryInfo->mTrackedRegionsByAllocationOrder.length();
+  for (size_t i = gNumTrackedRegions; i < newTrackedRegions; i++) {
+    const AllocatedMemoryRegion& region = gMemoryInfo->mTrackedRegionsByAllocationOrder[i];
+    gFreeRegions.Insert(region.mBase, region.mSize);
+  }
+  gNumTrackedRegions = newTrackedRegions;
+}
+
+/* static */ FreeRegionSet&
+FreeRegionSet::Get(MemoryKind aKind)
+{
+  return (aKind == MemoryKind::Tracked) ? gFreeRegions : gMemoryInfo->mFreeUntrackedRegions;
+}
+
+void*
+FreeRegionSet::TakeNextChunk()
+{
+  MOZ_RELEASE_ASSERT(mNextChunk);
+  void* res = mNextChunk;
+  mNextChunk = nullptr;
+  return res;
+}
+
+void
+FreeRegionSet::InsertLockHeld(void* aAddress, size_t aSize, AutoSpinLock& aLockHeld)
+{
+  mRegions.insert(aSize, AllocatedMemoryRegion((uint8_t*) aAddress, aSize, true));
+}
+
+void
+FreeRegionSet::MaybeRefillNextChunk(AutoSpinLock& aLockHeld)
+{
+  if (mNextChunk) {
+    return;
+  }
+
+  // Look for a free region we can take the next chunk from.
+  size_t size = ChunkPages * PageSize;
+  gMemoryInfo->mMemoryBalance[(size_t) mKind] += size;
+
+  mNextChunk = ExtractLockHeld(size, aLockHeld);
+
+  if (!mNextChunk) {
+    // Allocate memory from the system.
+    mNextChunk = DirectAllocateMemory(nullptr, size);
+    RegisterAllocatedMemory(mNextChunk, size, mKind);
+  }
+}
+
+void
+FreeRegionSet::Insert(void* aAddress, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aAddress && aAddress == PageBase(aAddress));
+  MOZ_RELEASE_ASSERT(aSize && aSize == RoundupSizeToPageBoundary(aSize));
+
+  AutoSpinLock lock(mLock);
+
+  MaybeRefillNextChunk(lock);
+  InsertLockHeld(aAddress, aSize, lock);
+}
+
+void*
+FreeRegionSet::ExtractLockHeld(size_t aSize, AutoSpinLock& aLockHeld)
+{
+  Maybe<AllocatedMemoryRegion> best =
+    mRegions.lookupClosestLessOrEqual(aSize, /* aRemove = */ true);
+  if (best.isSome()) {
+    MOZ_RELEASE_ASSERT(best.ref().mSize >= aSize);
+    uint8_t* res = best.ref().mBase;
+    if (best.ref().mSize > aSize) {
+      InsertLockHeld(res + aSize, best.ref().mSize - aSize, aLockHeld);
+    }
+    MemoryZero(res, aSize);
+    return res;
+  }
+  return nullptr;
+}
+
+void*
+FreeRegionSet::Extract(void* aAddress, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
+  MOZ_RELEASE_ASSERT(aSize && aSize == RoundupSizeToPageBoundary(aSize));
+
+  AutoSpinLock lock(mLock);
+
+  if (aAddress) {
+    MaybeRefillNextChunk(lock);
+
+    // We were given a point at which to try to place the allocation. Look for
+    // a free region which contains [aAddress, aAddress + aSize] entirely.
+    for (typename Tree::Iter iter = mRegions.begin(); !iter.done(); ++iter) {
+      uint8_t* regionBase = iter.ref().mBase;
+      uint8_t* regionExtent = regionBase + iter.ref().mSize;
+      uint8_t* addrBase = (uint8_t*)aAddress;
+      uint8_t* addrExtent = addrBase + aSize;
+      if (regionBase <= addrBase && regionExtent >= addrExtent) {
+        iter.removeEntry();
+        if (regionBase < addrBase) {
+          InsertLockHeld(regionBase, addrBase - regionBase, lock);
+        }
+        if (regionExtent > addrExtent) {
+          InsertLockHeld(addrExtent, regionExtent - addrExtent, lock);
+        }
+        MemoryZero(aAddress, aSize);
+        return aAddress;
+      }
+    }
+    // Fall through and look for a free region at another address.
+  }
+
+  // No address hint, look for the smallest free region which is larger than
+  // the desired allocation size.
+  return ExtractLockHeld(aSize, lock);
+}
+
+bool
+FreeRegionSet::Intersects(void* aAddress, size_t aSize)
+{
+  AutoSpinLock lock(mLock);
+  for (typename Tree::Iter iter = mRegions.begin(); !iter.done(); ++iter) {
+    if (MemoryIntersects(iter.ref().mBase, iter.ref().mSize, aAddress, aSize)) {
+      return true;
+    }
+  }
+  return false;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Memory Management
+///////////////////////////////////////////////////////////////////////////////
+
+void
+RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, MemoryKind aKind)
+{
+  MOZ_RELEASE_ASSERT(aBaseAddress == PageBase(aBaseAddress));
+  MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
+
+  uint8_t* aAddress = reinterpret_cast<uint8_t*>(aBaseAddress);
+
+  if (aKind != MemoryKind::Tracked) {
+    if (!HasSavedCheckpoint()) {
+      AddInitialUntrackedMemoryRegion(aAddress, aSize);
+    }
+  } else if (HasSavedCheckpoint()) {
+    EnsureMemoryChangesAllowed();
+    DirectWriteProtectMemory(aAddress, aSize, true);
+    AddTrackedRegion(aAddress, aSize, true);
+  }
+}
+
+void
+CheckFixedMemory(void* aAddress, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
+  MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
+
+  if (!HasSavedCheckpoint()) {
+    return;
+  }
+
+  {
+    // The memory should already be tracked. Check each page in the allocation
+    // because there might be tracked regions adjacent to one another, neither
+    // of which entirely contains this memory.
+    AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
+    for (size_t offset = 0; offset < aSize; offset += PageSize) {
+      uint8_t* page = (uint8_t*)aAddress + offset;
+      Maybe<AllocatedMemoryRegion> region =
+        gMemoryInfo->mTrackedRegions.lookupClosestLessOrEqual(page);
+      if (!region.isSome() ||
+          !MemoryContains(region.ref().mBase, region.ref().mSize, page, PageSize)) {
+        child::ReportFatalError("Fixed memory is not tracked!");
+      }
+    }
+  }
+
+  // The memory should not be free.
+  if (gFreeRegions.Intersects(aAddress, aSize)) {
+    child::ReportFatalError("Fixed memory is currently free!");
+  }
+}
+
+void
+RestoreWritableFixedMemory(void* aAddress, size_t aSize)
+{
+  MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
+  MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
+
+  if (!HasSavedCheckpoint()) {
+    return;
+  }
+
+  AutoSpinLock lock(gMemoryInfo->mActiveDirtyLock);
+  for (size_t offset = 0; offset < aSize; offset += PageSize) {
+    uint8_t* page = (uint8_t*)aAddress + offset;
+    if (gMemoryInfo->mActiveDirty.maybeLookup(page)) {
+      DirectUnprotectMemory(page, PageSize, true);
+    }
+  }
+}
+
+void*
+AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind)
+{
+  MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
+  aSize = RoundupSizeToPageBoundary(aSize);
+
+  if (gMemoryInfo) {
+    gMemoryInfo->mMemoryBalance[(size_t) aKind] += aSize;
+  }
+
+  if (HasSavedCheckpoint()) {
+    if (void* res = FreeRegionSet::Get(aKind).Extract(aAddress, aSize)) {
+      return res;
+    }
+  }
+
+  void* res = DirectAllocateMemory(aAddress, aSize);
+  RegisterAllocatedMemory(res, aSize, aKind);
+  return res;
+}
+
+void*
+AllocateMemory(size_t aSize, MemoryKind aKind)
+{
+  if (!IsReplaying()) {
+    return DirectAllocateMemory(nullptr, aSize);
+  }
+  return AllocateMemoryTryAddress(nullptr, aSize, aKind);
+}
+
+void
+DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind)
+{
+  // Round the supplied region to the containing page boundaries.
+  aSize += (uint8_t*) aAddress - PageBase(aAddress);
+  aAddress = PageBase(aAddress);
+  aSize = RoundupSizeToPageBoundary(aSize);
+
+  if (!aAddress || !aSize) {
+    return;
+  }
+
+  if (gMemoryInfo) {
+    gMemoryInfo->mMemoryBalance[(size_t) aKind] -= aSize;
+  }
+
+  // Memory is returned to the system before saving the first checkpoint.
+  if (!HasSavedCheckpoint()) {
+    if (IsReplaying() && aKind != MemoryKind::Tracked) {
+      RemoveInitialUntrackedRegion((uint8_t*) aAddress, aSize);
+    }
+    DirectDeallocateMemory(aAddress, aSize);
+    return;
+  }
+
+  if (aKind == MemoryKind::Tracked) {
+    // For simplicity, all free regions must be executable, so ignore deallocated
+    // memory in regions that are not executable.
+    bool executable;
+    if (!IsTrackedAddress(aAddress, &executable) || !executable) {
+      return;
+    }
+  }
+
+  // Mark this region as free, but do not unmap it. It will become usable for
+  // later allocations, but will not need to be remapped if we end up
+  // rewinding to a point where this memory was in use.
+  FreeRegionSet::Get(aKind).Insert(aAddress, aSize);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Snapshot Threads
+///////////////////////////////////////////////////////////////////////////////
+
+// While on a snapshot thread, restore the contents of all pages belonging to
+// this thread which were modified since the last recorded diff snapshot.
+static void
+SnapshotThreadRestoreLastDiffSnapshot(SnapshotThreadWorklist* aWorklist)
+{
+  CheckpointId checkpoint = GetLastSavedCheckpoint();
+
+  DirtyPageSet& set = aWorklist->mSets.back();
+  MOZ_RELEASE_ASSERT(set.mCheckpoint == checkpoint);
+
+  // Copy the original contents of all pages.
+  for (size_t index = 0; index < set.mPages.length(); index++) {
+    const DirtyPage& page = set.mPages[index];
+    MOZ_RELEASE_ASSERT(page.mOriginal);
+    DirectUnprotectMemory(page.mBase, PageSize, page.mExecutable);
+    MemoryMove(page.mBase, page.mOriginal, PageSize);
+    DirectWriteProtectMemory(page.mBase, PageSize, page.mExecutable);
+    FreePageCopy(page.mOriginal);
+  }
+
+  // Remove the set from the worklist, if necessary.
+  if (!aWorklist->mSets.empty()) {
+    MOZ_RELEASE_ASSERT(&set == &aWorklist->mSets.back());
+    aWorklist->mSets.popBack();
+  }
+}
+
+// Start routine for a snapshot thread.
+void
+SnapshotThreadMain(void* aArgument)
+{
+  size_t threadIndex = (size_t) aArgument;
+  SnapshotThreadWorklist* worklist = &gMemoryInfo->mSnapshotWorklists[threadIndex];
+  worklist->mThreadIndex = threadIndex;
+
+  while (true) {
+    // If the main thread is waiting for us to restore the most recent diff,
+    // then do so and notify the main thread we finished.
+    if (gMemoryInfo->mSnapshotThreadsShouldRestore.IsActive()) {
+      SnapshotThreadRestoreLastDiffSnapshot(worklist);
+      gMemoryInfo->mSnapshotThreadsShouldRestore.WaitUntilNoLongerActive();
+    }
+
+    // Idle if the main thread wants us to.
+    if (gMemoryInfo->mSnapshotThreadsShouldIdle.IsActive()) {
+      gMemoryInfo->mSnapshotThreadsShouldIdle.WaitUntilNoLongerActive();
+    }
+
+    // Idle until notified by the main thread.
+    Thread::WaitNoIdle();
+  }
+}
+
+// An alternative to memcmp that can be called from any place.
+static bool
+MemoryEquals(void* aDst, void* aSrc, size_t aSize)
+{
+  MOZ_ASSERT((size_t)aDst % sizeof(size_t) == 0);
+  MOZ_ASSERT((size_t)aSrc % sizeof(size_t) == 0);
+  MOZ_ASSERT(aSize % sizeof(size_t) == 0);
+
+  size_t* ndst = (size_t*)aDst;
+  size_t* nsrc = (size_t*)aSrc;
+  for (size_t i = 0; i < aSize / sizeof(size_t); i++) {
+    if (ndst[i] != nsrc[i]) {
+      return false;
+    }
+  }
+  return true;
+}
+
+// Add a page to the last set in some snapshot thread's worklist. This is
+// called on the main thread while the snapshot thread is idle.
+static void
+AddDirtyPageToWorklist(uint8_t* aAddress, uint8_t* aOriginal, bool aExecutable)
+{
+  // Distribute pages to snapshot threads using the base address of a page.
+  // This guarantees that the same page will be consistently assigned to the
+  // same thread as different snapshots are taken.
+  MOZ_ASSERT((size_t)aAddress % PageSize == 0);
+  if (MemoryEquals(aAddress, aOriginal, PageSize)) {
+    FreePageCopy(aOriginal);
+  } else {
+    size_t pageIndex = ((size_t)aAddress / PageSize) % NumSnapshotThreads;
+    SnapshotThreadWorklist* worklist = &gMemoryInfo->mSnapshotWorklists[pageIndex];
+    MOZ_RELEASE_ASSERT(!worklist->mSets.empty());
+    DirtyPageSet& set = worklist->mSets.back();
+    MOZ_RELEASE_ASSERT(set.mCheckpoint == GetLastSavedCheckpoint());
+    set.mPages.emplaceBack(aAddress, aOriginal, aExecutable);
+  }
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Snapshot Interface
+///////////////////////////////////////////////////////////////////////////////
+
+void
+InitializeMemorySnapshots()
+{
+  MOZ_RELEASE_ASSERT(gMemoryInfo == nullptr);
+  void* memory = AllocateMemory(sizeof(MemoryInfo), MemoryKind::Generic);
+  gMemoryInfo = new(memory) MemoryInfo();
+
+  // Mark gMemoryInfo as untracked. See AddInitialUntrackedMemoryRegion.
+  AddInitialUntrackedMemoryRegion(reinterpret_cast<uint8_t*>(memory), sizeof(MemoryInfo));
+}
+
+void
+InitializeCountdownThread()
+{
+#ifdef WANT_COUNTDOWN_THREAD
+  Thread::SpawnNonRecordedThread(CountdownThreadMain, nullptr);
+#endif
+}
+
+void
+TakeFirstMemorySnapshot()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+  MOZ_RELEASE_ASSERT(gMemoryInfo->mTrackedRegions.empty());
+
+  // Spawn all snapshot threads.
+  {
+    AutoPassThroughThreadEvents pt;
+
+    for (size_t i = 0; i < NumSnapshotThreads; i++) {
+      Thread* thread = Thread::SpawnNonRecordedThread(SnapshotThreadMain, (void*) i);
+      gMemoryInfo->mSnapshotWorklists[i].mThreadId = thread->Id();
+    }
+  }
+
+  // All threads should have been created by now.
+  MarkThreadStacksAsUntracked();
+
+  // Fill in the tracked regions for the process.
+  ProcessAllInitialMemoryRegions();
+}
+
+void
+TakeDiffMemorySnapshot()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+
+  UpdateNumTrackedRegionsForSnapshot();
+
+  AutoDisallowMemoryChanges disallow;
+
+  // Stop all snapshot threads while we modify their worklists.
+  gMemoryInfo->mSnapshotThreadsShouldIdle.ActivateBegin();
+
+  // Add a DirtyPageSet to each snapshot thread's worklist for this snapshot.
+  for (size_t i = 0; i < NumSnapshotThreads; i++) {
+    SnapshotThreadWorklist* worklist = &gMemoryInfo->mSnapshotWorklists[i];
+    worklist->mSets.emplaceBack(GetLastSavedCheckpoint());
+  }
+
+  // Distribute remaining active dirty pages to the snapshot thread worklists.
+  for (SortedDirtyPageSet::Iter iter = gMemoryInfo->mActiveDirty.begin(); !iter.done(); ++iter) {
+    AddDirtyPageToWorklist(iter.ref().mBase, iter.ref().mOriginal, iter.ref().mExecutable);
+    DirectWriteProtectMemory(iter.ref().mBase, PageSize, iter.ref().mExecutable);
+  }
+
+  gMemoryInfo->mActiveDirty.clear();
+
+  // Allow snapshot threads to resume execution.
+  gMemoryInfo->mSnapshotThreadsShouldIdle.ActivateEnd();
+}
+
+void
+RestoreMemoryToLastSavedCheckpoint()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+
+  AutoDisallowMemoryChanges disallow;
+
+  // Restore all dirty regions that have been modified since the last
+  // checkpoint was saved/restored.
+  for (SortedDirtyPageSet::Iter iter = gMemoryInfo->mActiveDirty.begin(); !iter.done(); ++iter) {
+    MemoryMove(iter.ref().mBase, iter.ref().mOriginal, PageSize);
+    FreePageCopy(iter.ref().mOriginal);
+    DirectWriteProtectMemory(iter.ref().mBase, PageSize, iter.ref().mExecutable);
+  }
+  gMemoryInfo->mActiveDirty.clear();
+}
+
+void
+RestoreMemoryToLastSavedDiffCheckpoint()
+{
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+  MOZ_RELEASE_ASSERT(gMemoryInfo->mActiveDirty.empty());
+
+  AutoDisallowMemoryChanges disallow;
+
+  // Wait while the snapshot threads restore all pages modified since the diff
+  // snapshot was recorded.
+  gMemoryInfo->mSnapshotThreadsShouldRestore.ActivateBegin();
+  gMemoryInfo->mSnapshotThreadsShouldRestore.ActivateEnd();
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/MemorySnapshot.h
@@ -0,0 +1,128 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_MemorySnapshot_h
+#define mozilla_recordreplay_MemorySnapshot_h
+
+#include "mozilla/Types.h"
+#include "ProcessRecordReplay.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Memory Snapshots Overview.
+//
+// As described in ProcessRewind.h, some subset of the checkpoints which are
+// reached during execution are saved, so that their state can be restored
+// later. Memory snapshots are used to save and restore the contents of all
+// heap memory: everything except thread stacks (see ThreadSnapshot.h for
+// saving and restoring these) and untracked memory (which is not saved or
+// restored, see ProcessRecordReplay.h).
+//
+// Each memory snapshot is a diff of the heap memory contents compared to the
+// next one. See MemorySnapshot.cpp for how diffs are represented and computed.
+//
+// Rewinding must restore the exact contents of heap memory that existed when
+// the target checkpoint was reached. Because of this, memory that is allocated
+// at a point when a checkpoint is saved will never actually be returned to the
+// system. We instead keep a set of free blocks that are unused at the current
+// point of execution and are available to satisfy new allocations.
+
+// Make sure that a block of memory in a fixed allocation is already allocated.
+void CheckFixedMemory(void* aAddress, size_t aSize);
+
+// After marking a block of memory in a fixed allocation as non-writable,
+// restore writability to any dirty pages in the range.
+void RestoreWritableFixedMemory(void* aAddress, size_t aSize);
+
+// Allocate memory, trying to use a specific address if provided but only if
+// it is free.
+void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind);
+
+// Note a range of memory that was just allocated from the system, and the
+// kind of memory allocation that was performed.
+void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, MemoryKind aKind);
+
+// Initialize the memory snapshots system.
+void InitializeMemorySnapshots();
+
+// Take the first heap memory snapshot.
+void TakeFirstMemorySnapshot();
+
+// Take a differential heap memory snapshot compared to the last one,
+// associated with the last saved checkpoint.
+void TakeDiffMemorySnapshot();
+
+// Restore all heap memory to its state when the most recent checkpoint was
+// saved. This requires no checkpoints to have been saved after this one.
+void RestoreMemoryToLastSavedCheckpoint();
+
+// Restore all heap memory to its state at a checkpoint where a complete diff
+// was saved vs. the following saved checkpoint. This requires that no
+// tracked heap memory has been changed since the last saved checkpoint.
+void RestoreMemoryToLastSavedDiffCheckpoint();
+
+// Erase all information from the last diff snapshot taken, so that tracked
+// heap memory changes are with respect to the previous checkpoint.
+void EraseLastSavedDiffMemorySnapshot();
+
+// Set whether to allow changes to tracked heap memory at this point. If such
+// changes occur when they are not allowed then the process will crash.
+void SetMemoryChangesAllowed(bool aAllowed);
+
+struct MOZ_RAII AutoDisallowMemoryChanges
+{
+  AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(false); }
+  ~AutoDisallowMemoryChanges() { SetMemoryChangesAllowed(true); }
+};
+
+// After a SEGV on the specified address, check if the violation occurred due
+// to the memory having been write protected by the snapshot mechanism. This
+// function returns whether the fault has been handled and execution may
+// continue.
+bool HandleDirtyMemoryFault(uint8_t* aAddress);
+
+// For debugging, note a point where we hit an unrecoverable failure and try
+// to make things easier for the debugger.
+void UnrecoverableSnapshotFailure();
+
+// After rewinding, mark all memory that has been allocated since the snapshot
+// was taken as free.
+void FixupFreeRegionsAfterRewind();
+
+// Set whether to allow intentionally crashing in this process via the
+// RecordReplayDirective method.
+void SetAllowIntentionalCrashes(bool aAllowed);
+
+// When WANT_COUNTDOWN_THREAD is defined (see MemorySnapshot.cpp), set a count
+// that, after a thread consumes it, causes the thread to report a fatal error.
+// This is used for debugging and is a workaround for lldb often being unable
+// to interrupt a running process.
+void StartCountdown(size_t aCount);
+
+// Per StartCountdown, set a countdown and remove it on destruction.
+struct MOZ_RAII AutoCountdown
+{
+  explicit AutoCountdown(size_t aCount);
+  ~AutoCountdown();
+};
+
+// Initialize the thread consuming the countdown.
+void InitializeCountdownThread();
+
+// This is an alternative to memmove/memcpy that can be called in areas where
+// faults in write protected memory are not allowed. It's hard to avoid dynamic
+// code loading when calling memmove/memcpy directly.
+void MemoryMove(void* aDst, const void* aSrc, size_t aSize);
+
+// Similarly, zero out a range of memory without doing anything weird with
+// dynamic code loading.
+void MemoryZero(void* aDst, size_t aSize);
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_MemorySnapshot_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/Monitor.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_Monitor_h
+#define mozilla_recordreplay_Monitor_h
+
+#include "mozilla/PlatformConditionVariable.h"
+
+namespace mozilla {
+namespace recordreplay {
+
+// Simple wrapper around mozglue mutexes and condvars. This is a lighter weight
+// abstraction than mozilla::Monitor and has simpler interactions with the
+// record/replay system.
+class Monitor : public detail::MutexImpl
+{
+public:
+  Monitor()
+    : detail::MutexImpl(Behavior::DontPreserve)
+  {}
+
+  void Lock() { detail::MutexImpl::lock(); }
+  void Unlock() { detail::MutexImpl::unlock(); }
+  void Wait() { mCondVar.wait(*this); }
+  void Notify() { mCondVar.notify_one(); }
+  void NotifyAll() { mCondVar.notify_all(); }
+
+  void WaitUntil(TimeStamp aTime) {
+    AutoEnsurePassThroughThreadEvents pt;
+    mCondVar.wait_for(*this, aTime - TimeStamp::Now());
+  }
+
+private:
+  detail::ConditionVariableImpl mCondVar;
+};
+
+// RAII class to lock a monitor.
+struct MOZ_RAII MonitorAutoLock
+{
+  explicit MonitorAutoLock(Monitor& aMonitor)
+    : mMonitor(aMonitor)
+  {
+    mMonitor.Lock();
+  }
+
+  ~MonitorAutoLock()
+  {
+    mMonitor.Unlock();
+  }
+
+private:
+  Monitor& mMonitor;
+};
+
+// RAII class to unlock a monitor.
+struct MOZ_RAII MonitorAutoUnlock
+{
+  explicit MonitorAutoUnlock(Monitor& aMonitor)
+    : mMonitor(aMonitor)
+  {
+    mMonitor.Unlock();
+  }
+
+  ~MonitorAutoUnlock()
+  {
+    mMonitor.Lock();
+  }
+
+private:
+  Monitor& mMonitor;
+};
+
+} // namespace recordreplay
+} // namespace mozilla
+
+#endif // mozilla_recordreplay_Monitor_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/ProcessRecordReplay.cpp
@@ -0,0 +1,671 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ProcessRecordReplay.h"
+
+#include "ipc/ChildInternal.h"
+#include "mozilla/Compression.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Sprintf.h"
+#include "mozilla/StackWalk.h"
+#include "mozilla/StaticMutex.h"
+#include "DirtyMemoryHandler.h"
+#include "Lock.h"
+#include "MemorySnapshot.h"
+#include "ProcessRedirect.h"
+#include "ProcessRewind.h"
+#include "Trigger.h"
+#include "ValueIndex.h"
+#include "WeakPointer.h"
+#include "pratom.h"
+
+#include <fcntl.h>
+#include <unistd.h>
+
+namespace mozilla {
+namespace recordreplay {
+
+MOZ_NEVER_INLINE void
+BusyWait()
+{
+  static volatile int value = 1;
+  while (value) {}
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Basic interface
+///////////////////////////////////////////////////////////////////////////////
+
+File* gRecordingFile;
+const char* gSnapshotMemoryPrefix;
+const char* gSnapshotStackPrefix;
+
+char* gInitializationFailureMessage;
+
+static void DumpRecordingAssertions();
+
+bool gInitialized;
+ProcessKind gProcessKind;
+char* gRecordingFilename;
+
+// Current process ID.
+static int gPid;
+
+// Whether to spew record/replay messages to stderr.
+static bool gSpewEnabled;
+
+extern "C" {
+
+MOZ_EXPORT void
+RecordReplayInterface_Initialize(int aArgc, char* aArgv[])
+{
+  // Parse command line options for the process kind and recording file.
+  Maybe<ProcessKind> processKind;
+  Maybe<char*> recordingFile;
+  for (int i = 0; i < aArgc; i++) {
+    if (!strcmp(aArgv[i], gProcessKindOption)) {
+      MOZ_RELEASE_ASSERT(processKind.isNothing() && i + 1 < aArgc);
+      processKind.emplace((ProcessKind) atoi(aArgv[i + 1]));
+    }
+    if (!strcmp(aArgv[i], gRecordingFileOption)) {
+      MOZ_RELEASE_ASSERT(recordingFile.isNothing() && i + 1 < aArgc);
+      recordingFile.emplace(aArgv[i + 1]);
+    }
+  }
+  MOZ_RELEASE_ASSERT(processKind.isSome() && recordingFile.isSome());
+
+  gProcessKind = processKind.ref();
+  gRecordingFilename = strdup(recordingFile.ref());
+
+  switch (processKind.ref()) {
+  case ProcessKind::Recording:
+    gIsRecording = gIsRecordingOrReplaying = true;
+    fprintf(stderr, "RECORDING %d %s\n", getpid(), recordingFile.ref());
+    break;
+  case ProcessKind::Replaying:
+    gIsReplaying = gIsRecordingOrReplaying = true;
+    fprintf(stderr, "REPLAYING %d %s\n", getpid(), recordingFile.ref());
+    break;
+  case ProcessKind::MiddlemanRecording:
+  case ProcessKind::MiddlemanReplaying:
+    gIsMiddleman = true;
+    fprintf(stderr, "MIDDLEMAN %d %s\n", getpid(), recordingFile.ref());
+    break;
+  default:
+    MOZ_CRASH("Bad ProcessKind");
+  }
+
+  if (IsRecordingOrReplaying() && TestEnv("WAIT_AT_START")) {
+    BusyWait();
+  }
+
+  if (IsMiddleman() && TestEnv("MIDDLEMAN_WAIT_AT_START")) {
+    BusyWait();
+  }
+
+  gPid = getpid();
+  if (TestEnv("RECORD_REPLAY_SPEW")) {
+    gSpewEnabled = true;
+  }
+
+  EarlyInitializeRedirections();
+
+  if (!IsRecordingOrReplaying()) {
+    return;
+  }
+
+  gSnapshotMemoryPrefix = mktemp(strdup("/tmp/SnapshotMemoryXXXXXX"));
+  gSnapshotStackPrefix = mktemp(strdup("/tmp/SnapshotStackXXXXXX"));
+
+  InitializeCurrentTime();
+
+  gRecordingFile = new File();
+  if (!gRecordingFile->Open(recordingFile.ref(), IsRecording() ? File::WRITE : File::READ)) {
+    gInitializationFailureMessage = strdup("Bad recording file");
+    return;
+  }
+
+  if (!InitializeRedirections()) {
+    MOZ_RELEASE_ASSERT(gInitializationFailureMessage);
+    return;
+  }
+
+  Thread::InitializeThreads();
+
+  Thread* thread = Thread::GetById(MainThreadId);
+  MOZ_ASSERT(thread->Id() == MainThreadId);
+
+  thread->BindToCurrent();
+  thread->SetPassThrough(true);
+
+  if (IsReplaying() && TestEnv("DUMP_RECORDING")) {
+    DumpRecordingAssertions();
+  }
+
+  InitializeTriggers();
+  InitializeWeakPointers();
+  InitializeMemorySnapshots();
+  Thread::SpawnAllThreads();
+  InitializeCountdownThread();
+  SetupDirtyMemoryHandler();
+
+  // Don't create a stylo thread pool when recording or replaying.
+  putenv((char*) "STYLO_THREADS=1");
+
+  thread->SetPassThrough(false);
+
+  Lock::InitializeLocks();
+  InitializeRewindState();
+
+  gInitialized = true;
+}
+
+MOZ_EXPORT size_t
+RecordReplayInterface_InternalRecordReplayValue(size_t aValue)
+{
+  MOZ_ASSERT(IsRecordingOrReplaying());
+
+  if (AreThreadEventsPassedThrough()) {
+    return aValue;
+  }
+  EnsureNotDivergedFromRecording();
+
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+  Thread* thread = Thread::Current();
+
+  RecordReplayAssert("Value");
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Value);
+  thread->Events().RecordOrReplayValue(&aValue);
+  return aValue;
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalRecordReplayBytes(void* aData, size_t aSize)
+{
+  MOZ_ASSERT(IsRecordingOrReplaying());
+
+  if (AreThreadEventsPassedThrough()) {
+    return;
+  }
+  EnsureNotDivergedFromRecording();
+
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+  Thread* thread = Thread::Current();
+
+  RecordReplayAssert("Bytes %d", (int) aSize);
+  thread->Events().RecordOrReplayThreadEvent(ThreadEvent::Bytes);
+  thread->Events().CheckInput(aSize);
+  thread->Events().RecordOrReplayBytes(aData, aSize);
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalInvalidateRecording(const char* aWhy)
+{
+  if (IsRecording()) {
+    child::ReportFatalError("Recording invalidated: %s", aWhy);
+  } else {
+    child::ReportFatalError("Recording invalidated while replaying: %s", aWhy);
+  }
+  Unreachable();
+}
+
+} // extern "C"
+
+// How many recording endpoints have been flushed to the recording.
+static size_t gNumEndpoints;
+
+void
+FlushRecording()
+{
+  MOZ_RELEASE_ASSERT(IsRecording());
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+
+  // Save the endpoint of the recording.
+  js::ExecutionPoint endpoint = navigation::GetRecordingEndpoint();
+  Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
+  endpointStream->WriteScalar(++gNumEndpoints);
+  endpointStream->WriteBytes(&endpoint, sizeof(endpoint));
+
+  gRecordingFile->PreventStreamWrites();
+
+  gRecordingFile->Flush();
+
+  child::NotifyFlushedRecording();
+
+  gRecordingFile->AllowStreamWrites();
+}
+
+// Try to load another recording index, returning whether one was found.
+static bool
+LoadNextRecordingIndex()
+{
+  Thread::WaitForIdleThreads();
+
+  InfallibleVector<Stream*> updatedStreams;
+  File::ReadIndexResult result = gRecordingFile->ReadNextIndex(&updatedStreams);
+  if (result == File::ReadIndexResult::InvalidFile) {
+    MOZ_CRASH("Bad recording file");
+  }
+
+  bool found = result == File::ReadIndexResult::FoundIndex;
+  if (found) {
+    for (Stream* stream : updatedStreams) {
+      if (stream->Name() == StreamName::Lock) {
+        Lock::LockAquiresUpdated(stream->NameIndex());
+      }
+    }
+  }
+
+  Thread::ResumeIdleThreads();
+  return found;
+}
+
+bool
+HitRecordingEndpoint()
+{
+  MOZ_RELEASE_ASSERT(IsReplaying());
+  MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
+
+  // The debugger will call this method in a loop, so we don't have to do
+  // anything fancy to try to get the most up to date endpoint. As long as we
+  // can make some progress in attempting to find a later endpoint, we can
+  // return control to the debugger.
+
+  // Check if there is a new endpoint in the endpoint data stream.
+  Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
+  if (!endpointStream->AtEnd()) {
+    js::ExecutionPoint endpoint;
+    size_t index = endpointStream->ReadScalar();
+    endpointStream->ReadBytes(&endpoint, sizeof(endpoint));
+    navigation::SetRecordingEndpoint(index, endpoint);
+    return true;
+  }
+
+  // Check if there is more data in the recording.
+  if (LoadNextRecordingIndex()) {
+    return true;
+  }
+
+  // OK, we hit the most up to date endpoint in the recording.
+  return false;
+}
+
+void
+HitEndOfRecording()
+{
+  MOZ_RELEASE_ASSERT(IsReplaying());
+  MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
+
+  if (Thread::CurrentIsMainThread()) {
+    // Load more data from the recording. The debugger is not allowed to let us
+    // go past the recording endpoint, so there should be more data.
+    bool found = LoadNextRecordingIndex();
+    MOZ_RELEASE_ASSERT(found);
+  } else {
+    // Non-main threads may wait until more recording data is loaded by the
+    // main thread.
+    Thread::Wait();
+  }
+}
+
+bool
+SpewEnabled()
+{
+  return gSpewEnabled;
+}
+
+void
+InternalPrint(const char* aFormat, va_list aArgs)
+{
+  char buf1[2048];
+  VsprintfLiteral(buf1, aFormat, aArgs);
+  char buf2[2048];
+  SprintfLiteral(buf2, "Spew[%d]: %s", gPid, buf1);
+  DirectPrint(buf2);
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Record/Replay Assertions
+///////////////////////////////////////////////////////////////////////////////
+
+struct StackWalkData
+{
+  char* mBuf;
+  size_t mSize;
+
+  StackWalkData(char* aBuf, size_t aSize)
+    : mBuf(aBuf), mSize(aSize)
+  {}
+
+  void append(const char* aText) {
+    size_t len = strlen(aText);
+    if (len <= mSize) {
+      strcpy(mBuf, aText);
+      mBuf += len;
+      mSize -= len;
+    }
+  }
+};
+
+static void
+StackWalkCallback(uint32_t aFrameNumber, void* aPC, void* aSP, void* aClosure)
+{
+  StackWalkData* data = (StackWalkData*) aClosure;
+
+  MozCodeAddressDetails details;
+  MozDescribeCodeAddress(aPC, &details);
+
+  data->append(" ### ");
+  data->append(details.function[0] ? details.function : "???");
+}
+
+static void
+SetCurrentStackString(const char* aAssertion, char* aBuf, size_t aSize)
+{
+  size_t frameCount = 12;
+
+  // Locking operations usually have extra stack goop.
+  if (!strcmp(aAssertion, "Lock 1")) {
+    frameCount += 8;
+  } else if (!strncmp(aAssertion, "Lock ", 5)) {
+    frameCount += 4;
+  }
+
+  StackWalkData data(aBuf, aSize);
+  MozStackWalk(StackWalkCallback, /* aSkipFrames = */ 2, frameCount, &data);
+}
+
+// For debugging.
+char*
+PrintCurrentStackString()
+{
+  AutoEnsurePassThroughThreadEvents pt;
+  char* buf = new char[1000];
+  SetCurrentStackString("", buf, 1000);
+  return buf;
+}
+
+static inline bool
+AlwaysCaptureEventStack(const char* aText)
+{
+  return false;
+}
+
+// Bit included in assertion stream when the assertion is a text assert, rather
+// than a byte sequence.
+static const size_t AssertionBit = 1;
+
+extern "C" {
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalRecordReplayAssert(const char* aFormat, va_list aArgs)
+{
+#ifdef INCLUDE_RECORD_REPLAY_ASSERTIONS
+  if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
+    return;
+  }
+
+  MOZ_RELEASE_ASSERT(!AreThreadEventsDisallowed());
+  Thread* thread = Thread::Current();
+
+  // Record an assertion string consisting of the name of the assertion and
+  // stack information about the current point of execution.
+  char text[1024];
+  VsprintfLiteral(text, aFormat, aArgs);
+  if (IsRecording() && (thread->ShouldCaptureEventStacks() || AlwaysCaptureEventStack(text))) {
+    AutoPassThroughThreadEvents pt;
+    SetCurrentStackString(text, text + strlen(text), sizeof(text) - strlen(text));
+  }
+
+  size_t textLen = strlen(text);
+
+  if (IsRecording()) {
+    thread->Asserts().WriteScalar(thread->Events().StreamPosition());
+    if (thread->IsMainThread()) {
+      thread->Asserts().WriteScalar(*ExecutionProgressCounter());
+    }
+    thread->Asserts().WriteScalar((textLen << 1) | AssertionBit);
+    thread->Asserts().WriteBytes(text, textLen);
+  } else {
+    // While replaying, both the assertion's name and the current position in
+    // the thread's events need to match up with what was recorded. The stack
+    // portion of the assertion text does not need to match, it is used to help
+    // track down the reason for the mismatch.
+    bool match = true;
+    size_t streamPos = thread->Asserts().ReadScalar();
+    if (streamPos != thread->Events().StreamPosition()) {
+      match = false;
+    }
+    size_t progress = 0;
+    if (thread->IsMainThread()) {
+      progress = thread->Asserts().ReadScalar();
+      if (progress != *ExecutionProgressCounter()) {
+        match = false;
+      }
+    }
+    size_t assertLen = thread->Asserts().ReadScalar() >> 1;
+
+    char* buffer = thread->TakeBuffer(assertLen + 1);
+
+    thread->Asserts().ReadBytes(buffer, assertLen);
+    buffer[assertLen] = 0;
+
+    if (assertLen < textLen || memcmp(buffer, text, textLen) != 0) {
+      match = false;
+    }
+
+    if (!match) {
+      for (int i = Thread::NumRecentAsserts - 1; i >= 0; i--) {
+        if (thread->RecentAssert(i).mText) {
+          Print("Thread %d Recent %d: %s [%d]\n",
+                (int) thread->Id(), (int) i,
+                thread->RecentAssert(i).mText, (int) thread->RecentAssert(i).mPosition);
+        }
+      }
+
+      {
+        AutoPassThroughThreadEvents pt;
+        SetCurrentStackString(text, text + strlen(text), sizeof(text) - strlen(text));
+      }
+
+      child::ReportFatalError("Assertion Mismatch: Thread %d\n"
+                              "Recorded: %s [%d,%d]\n"
+                              "Replayed: %s [%d,%d]\n",
+                              (int) thread->Id(), buffer, (int) streamPos, (int) progress, text,
+                              (int) thread->Events().StreamPosition(),
+                              (int) (thread->IsMainThread() ? *ExecutionProgressCounter() : 0));
+      Unreachable();
+    }
+
+    thread->RestoreBuffer(buffer);
+
+    // Push this assert onto the recent assertions in the thread.
+    free(thread->RecentAssert(Thread::NumRecentAsserts - 1).mText);
+    for (size_t i = Thread::NumRecentAsserts - 1; i >= 1; i--) {
+      thread->RecentAssert(i) = thread->RecentAssert(i - 1);
+    }
+    thread->RecentAssert(0).mText = strdup(text);
+    thread->RecentAssert(0).mPosition = thread->Events().StreamPosition();
+  }
+#endif // INCLUDE_RECORD_REPLAY_ASSERTIONS
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalRecordReplayAssertBytes(const void* aData, size_t aSize)
+{
+#ifdef INCLUDE_RECORD_REPLAY_ASSERTIONS
+  RecordReplayAssert("AssertBytes");
+
+  if (AreThreadEventsPassedThrough() || HasDivergedFromRecording()) {
+    return;
+  }
+
+  MOZ_ASSERT(!AreThreadEventsDisallowed());
+  Thread* thread = Thread::Current();
+
+  if (IsRecording()) {
+    thread->Asserts().WriteScalar(thread->Events().StreamPosition());
+    thread->Asserts().WriteScalar(aSize << 1);
+    thread->Asserts().WriteBytes(aData, aSize);
+  } else {
+    bool match = true;
+    size_t streamPos = thread->Asserts().ReadScalar();
+    if (streamPos != thread->Events().StreamPosition()) {
+      match = false;
+    }
+    size_t oldSize = thread->Asserts().ReadScalar() >> 1;
+    if (oldSize != aSize) {
+      match = false;
+    }
+
+    char* buffer = thread->TakeBuffer(oldSize);
+
+    thread->Asserts().ReadBytes(buffer, oldSize);
+    if (match && memcmp(buffer, aData, oldSize) != 0) {
+      match = false;
+    }
+
+    if (!match) {
+      // On a byte mismatch, print out some of the mismatched bytes, up to a
+      // cutoff in case there are many mismatched bytes.
+      if (oldSize == aSize) {
+        static const size_t MAX_MISMATCHES = 100;
+        size_t mismatches = 0;
+        for (size_t i = 0; i < aSize; i++) {
+          if (((char*)aData)[i] != buffer[i]) {
+            Print("Position %d: %d %d\n", (int) i, (int) buffer[i], (int) ((char*)aData)[i]);
+            if (++mismatches == MAX_MISMATCHES) {
+              break;
+            }
+          }
+        }
+        if (mismatches == MAX_MISMATCHES) {
+          Print("Position ...\n");
+        }
+      }
+
+      child::ReportFatalError("Byte Comparison Check Failed: Position %d %d Length %d %d\n",
+                              (int) streamPos, (int) thread->Events().StreamPosition(),
+                              (int) oldSize, (int) aSize);
+      Unreachable();
+    }
+
+    thread->RestoreBuffer(buffer);
+  }
+#endif // INCLUDE_RECORD_REPLAY_ASSERTIONS
+}
+
+MOZ_EXPORT void
+RecordReplayRust_Assert(const uint8_t* aBuffer)
+{
+  RecordReplayAssert("%s", (const char*) aBuffer);
+}
+
+MOZ_EXPORT void
+RecordReplayRust_BeginPassThroughThreadEvents()
+{
+  BeginPassThroughThreadEvents();
+}
+
+MOZ_EXPORT void
+RecordReplayRust_EndPassThroughThreadEvents()
+{
+  EndPassThroughThreadEvents();
+}
+
+} // extern "C"
+
+static void
+DumpRecordingAssertions()
+{
+  Thread* thread = Thread::Current();
+
+  for (size_t id = MainThreadId; id <= MaxRecordedThreadId; id++) {
+    Stream* asserts = gRecordingFile->OpenStream(StreamName::Assert, id);
+    if (asserts->AtEnd()) {
+      continue;
+    }
+
+    fprintf(stderr, "Thread Assertions %d:\n", (int) id);
+    while (!asserts->AtEnd()) {
+      (void) asserts->ReadScalar();
+      size_t shiftedLen = asserts->ReadScalar();
+      size_t assertLen = shiftedLen >> 1;
+
+      char* buffer = thread->TakeBuffer(assertLen + 1);
+      asserts->ReadBytes(buffer, assertLen);
+      buffer[assertLen] = 0;
+
+      if (shiftedLen & AssertionBit) {
+        fprintf(stderr, "%s\n", buffer);
+      }
+
+      thread->RestoreBuffer(buffer);
+    }
+  }
+
+  fprintf(stderr, "Done with assertions, exiting...\n");
+  _exit(0);
+}
+
+static ValueIndex* gGenericThings;
+static StaticMutexNotRecorded gGenericThingsMutex;
+
+extern "C" {
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalRegisterThing(void* aThing)
+{
+  if (AreThreadEventsPassedThrough()) {
+    return;
+  }
+
+  AutoOrderedAtomicAccess at;
+  StaticMutexAutoLock lock(gGenericThingsMutex);
+  if (!gGenericThings) {
+    gGenericThings = new ValueIndex();
+  }
+  if (gGenericThings->Contains(aThing)) {
+    gGenericThings->Remove(aThing);
+  }
+  gGenericThings->Insert(aThing);
+}
+
+MOZ_EXPORT void
+RecordReplayInterface_InternalUnregisterThing(void* aThing)
+{
+  StaticMutexAutoLock lock(gGenericThingsMutex);
+  if (gGenericThings) {
+    gGenericThings->Remove(aThing);
+  }
+}
+
+MOZ_EXPORT size_t
+RecordReplayInterface_InternalThingIndex(void* aThing)
+{
+  if (!aThing) {
+    return 0;
+  }
+  StaticMutexAutoLock lock(gGenericThingsMutex);
+  size_t index = 0;
+  if (gGenericThings) {
+    gGenericThings->MaybeGetIndex(aThing, &index);
+  }
+  return index;
+}
+
+MOZ_EXPORT const char*
+RecordReplayInterface_InternalVirtualThingName(void* aThing)
+{
+  void* vtable = *(void**)aThing;
+  const char* name = SymbolNameRaw(vtable);
+  return name ? name : "(unknown)";
+}
+
+} // extern "C"
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/ProcessRecordReplay.h
@@ -0,0 +1,395 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_ProcessRecordReplay_h
+#define mozilla_recordreplay_ProcessRecordReplay_h
+
+#include "mozilla/PodOperations.h"
+#include "mozilla/RecordReplay.h"
+
+#include <algorithm>
+
+namespace mozilla {
+namespace recordreplay {
+
+// Record/Replay Internal API
+//
+// See mfbt/RecordReplay.h for the main record/replay public API and a high
+// level description of the record/replay system.
+//
+// This directory contains files used for recording, replaying, and rewinding a
+// process. The ipc subdirectory contains files used for IPC between a
+// replaying and middleman process, and between a middleman and chrome process.
+
+// ID of an event in a thread's event stream. Each ID in the stream is followed
+// by data associated with the event (see File::RecordOrReplayThreadEvent).
+enum class ThreadEvent : uint32_t
+{
+  // Spawned another thread.
+  CreateThread,
+
+  // Created a recorded lock.
+  CreateLock,
+
+  // Acquired a recorded lock.
+  Lock,
+
+  // Wait for a condition variable with a timeout.
+  WaitForCvarUntil,
+
+  // Called RecordReplayValue.
+  Value,
+
+  // Called RecordReplayBytes.
+  Bytes,
+
+  // Executed a nested callback (see Callback.h).
+  ExecuteCallback,
+
+  // Finished executing nested callbacks in a library API (see Callback.h).
+  CallbacksFinished,
+
+  // Restoring a data pointer used in a callback (see Callback.h).
+  RestoreCallbackData,
+
+  // Executed a trigger within a call to ExecuteTriggers.
+  ExecuteTrigger,
+
+  // Finished executing triggers within a call to ExecuteTriggers.
+  ExecuteTriggersFinished,
+
+  // Encoded information about an argument/rval used by a graphics call.
+  GraphicsArgument,
+  GraphicsRval,
+
+  // The start of event IDs for redirected call events. Event IDs after this
+  // point are platform specific.
+  CallStart
+};
+
+class File;
+
+// File used during recording and replay.
+extern File* gRecordingFile;
+
+// Whether record/replay state has finished initialization.
+extern bool gInitialized;
+
+// If we failed to initialize, any associated message.
+extern char* gInitializationFailureMessage;
+
+// Whether record/replay assertions should be performed.
+//#ifdef DEBUG
+#define INCLUDE_RECORD_REPLAY_ASSERTIONS 1
+//#endif
+
+// Flush any new recording data to disk.
+void FlushRecording();
+
+// Called when any thread hits the end of its event stream.
+void HitEndOfRecording();
+
+// Called when the main thread hits the latest recording endpoint it knows
+// about.
+bool HitRecordingEndpoint();
+
+// Possible directives to give via the RecordReplayDirective function.
+enum class Directive
+{
+  // Crash at the next use of MaybeCrash.
+  CrashSoon = 1,
+
+  // Irrevocably crash if CrashSoon has ever been used on the process.
+  MaybeCrash = 2,
+
+  // Always save temporary checkpoints when stepping around in the debugger.
+  AlwaysSaveTemporaryCheckpoints = 3,
+
+  // Mark all future checkpoints as major checkpoints in the middleman.
+  AlwaysMarkMajorCheckpoints = 4
+};
+
+// Get the process kind and recording file specified at the command line.
+// These are available in the middleman as well as while recording/replaying.
+extern ProcessKind gProcessKind;
+extern char* gRecordingFilename;
+
+///////////////////////////////////////////////////////////////////////////////
+// Helper Functions
+///////////////////////////////////////////////////////////////////////////////
+
+// Wait indefinitely for a debugger to be attached.
+void BusyWait();
+
+static inline void Unreachable() { MOZ_CRASH("Unreachable"); }
+
+// Get the symbol name for a function pointer address, if available.
+const char* SymbolNameRaw(void* aAddress);
+
+static inline bool
+MemoryContains(void* aBase, size_t aSize, void* aPtr, size_t aPtrSize = 1)
+{
+  MOZ_ASSERT(aPtrSize);
+  return (uint8_t*) aPtr >= (uint8_t*) aBase
+      && (uint8_t*) aPtr + aPtrSize <= (uint8_t*) aBase + aSize;
+}
+
+static inline bool
+MemoryIntersects(void* aBase0, size_t aSize0, void* aBase1, size_t aSize1)
+{
+  MOZ_ASSERT(aSize0 && aSize1);
+  return MemoryContains(aBase0, aSize0, aBase1)
+      || MemoryContains(aBase0, aSize0, (uint8_t*) aBase1 + aSize1 - 1)
+      || MemoryContains(aBase1, aSize1, aBase0);
+}
+
+static const size_t PageSize = 4096;
+
+static inline uint8_t*
+PageBase(void* aAddress)
+{
+  return (uint8_t*)aAddress - ((size_t)aAddress % PageSize);
+}
+
+static inline size_t
+RoundupSizeToPageBoundary(size_t aSize)
+{
+  if (aSize % PageSize) {
+    return aSize + PageSize - (aSize % PageSize);
+  }
+  return aSize;
+}
+
+static inline bool
+TestEnv(const char* env)
+{
+  const char* value = getenv(env);
+  return value && value[0];
+}
+
+// Check for membership in a vector.
+template <typename Vector, typename Entry>
+inline bool
+VectorContains(const Vector& aVector, const Entry& aEntry)
+{
+  return std::find(aVector.begin(), aVector.end(), aEntry) != aVector.end();
+}
+
+// Add or remove a unique entry to an unsorted vector.
+template <typename Vector, typename Entry>
+inline void
+VectorAddOrRemoveEntry(Vector& aVector, const Entry& aEntry, bool aAdding)
+{
+  for (Entry& existing : aVector) {
+    if (existing == aEntry) {
+      MOZ_RELEASE_ASSERT(!aAdding);
+      aVector.erase(&existing);
+      return;
+    }
+  }
+  MOZ_RELEASE_ASSERT(aAdding);
+  aVector.append(aEntry);
+}
+
+bool SpewEnabled();
+void InternalPrint(const char* aFormat, va_list aArgs);
+
+#define MOZ_MakeRecordReplayPrinter(aName, aSpewing)            \
+  static inline void                                            \
+  aName(const char* aFormat, ...)                               \
+  {                                                             \
+    if ((IsRecordingOrReplaying() || IsMiddleman()) && (!aSpewing || SpewEnabled())) { \
+      va_list ap;                                               \
+      va_start(ap, aFormat);                                    \
+      InternalPrint(aFormat, ap);                               \
+      va_end(ap);                                               \
+    }                                                           \
+  }
+
+// Print information about record/replay state. Printing is independent from
+// the recording and will be printed by any recording, replaying, or middleman
+// process. Spew is only printed when enabled via the RECORD_REPLAY_SPEW
+// environment variable.
+MOZ_MakeRecordReplayPrinter(Print, false)
+MOZ_MakeRecordReplayPrinter(PrintSpew, true)
+
+#undef MOZ_MakeRecordReplayPrinter
+
+///////////////////////////////////////////////////////////////////////////////
+// Profiling
+///////////////////////////////////////////////////////////////////////////////
+
+void InitializeCurrentTime();
+
+// Get a current timestamp, in microseconds.
+double CurrentTime();
+
+#define ForEachTimerKind(Macro)                 \
+  Macro(Default)
+
+enum class TimerKind {
+#define DefineTimerKind(aKind) aKind,
+  ForEachTimerKind(DefineTimerKind)
+#undef DefineTimerKind
+  Count
+};
+
+struct AutoTimer
+{
+  explicit AutoTimer(TimerKind aKind);
+  ~AutoTimer();
+
+private:
+  TimerKind mKind;
+  double mStart;
+};
+
+void DumpTimers();
+
+///////////////////////////////////////////////////////////////////////////////
+// Memory Management
+///////////////////////////////////////////////////////////////////////////////
+
+// In cases where memory is tracked and should be saved/restored with
+// checkoints, malloc and other standard library functions suffice to allocate
+// memory in the record/replay system. The routines below are used for handling
+// redirections for the raw system calls underlying the standard libraries, and
+// for cases where allocated memory should be untracked: the contents are
+// ignored when saving/restoring checkpoints.
+
+// Different kinds of memory used in the system.
+enum class MemoryKind {
+  // Memory whose contents are saved/restored with checkpoints.
+  Tracked,
+
+  // All remaining memory kinds refer to untracked memory.
+
+  // Memory not fitting into one of the categories below.
+  Generic,
+
+  // Memory used for thread snapshots.
+  ThreadSnapshot,
+
+  // Memory used by various parts of the memory snapshot system.
+  TrackedRegions,
+  FreeRegions,
+  DirtyPageSet,
+  SortedDirtyPageSet,
+  PageCopy,
+
+  // Memory used for navigation state.
+  Navigation,
+
+  Count
+};
+
+// Allocate or deallocate a block of memory of a particular kind. Allocated
+// memory is initially zeroed.
+void* AllocateMemory(size_t aSize, MemoryKind aKind);
+void DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind);
+
+// Allocation policy for managing memory of a particular kind.
+template <MemoryKind Kind>
+class AllocPolicy
+{
+public:
+  template <typename T>
+  T* maybe_pod_calloc(size_t aNumElems) {
+    if (aNumElems & tl::MulOverflowMask<sizeof(T)>::value) {
+      MOZ_CRASH();
+    }
+    // Note: AllocateMemory always returns zeroed memory.
+    return static_cast<T*>(AllocateMemory(aNumElems * sizeof(T), Kind));
+  }
+
+  template <typename T>
+  void free_(T* aPtr, size_t aSize) {
+    DeallocateMemory(aPtr, aSize * sizeof(T), Kind);
+  }
+
+  template <typename T>
+  T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+    T* res = maybe_pod_calloc<T>(aNewSize);
+    memcpy(res, aPtr, aOldSize * sizeof(T));
+    free_<T>(aPtr, aOldSize);
+    return res;
+  }
+
+  template <typename T>
+  T* maybe_pod_malloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
+
+  template <typename T>
+  T* pod_malloc(size_t aNumElems) { return maybe_pod_malloc<T>(aNumElems); }
+
+  template <typename T>
+  T* pod_calloc(size_t aNumElems) { return maybe_pod_calloc<T>(aNumElems); }
+
+  template <typename T>
+  T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
+    return maybe_pod_realloc<T>(aPtr, aOldSize, aNewSize);
+  }
+
+  void reportAllocOverflow() const {}
+
+  MOZ_MUST_USE bool checkSimulatedOOM() const {
+    return true;
+  }
+};
+
+///////////////////////////////////////////////////////////////////////////////
+// Redirection Bypassing
+///////////////////////////////////////////////////////////////////////////////
+
+// The functions below bypass any redirections and give access to the system
+// even if events are not passed through in the current thread. These are
+// implemented in the various platform ProcessRedirect*.cpp files, and will
+// crash on errors which can't be handled internally.
+
+// Generic typedef for a system file handle.
+typedef size_t FileHandle;
+
+// Allocate/deallocate a block of memory directly from the system.
+void* DirectAllocateMemory(void* aAddress, size_t aSize);
+void DirectDeallocateMemory(void* aAddress, size_t aSize);
+
+// Give a block of memory R or RX access.
+void DirectWriteProtectMemory(void* aAddress, size_t aSize, bool aExecutable,
+                              bool aIgnoreFailures = false);
+
+// Give a block of memory RW or RWX access.
+void DirectUnprotectMemory(void* aAddress, size_t aSize, bool aExecutable,
+                           bool aIgnoreFailures = false);
+
+// Open an existing file for reading or a new file for writing, clobbering any
+// existing file.
+FileHandle DirectOpenFile(const char* aFilename, bool aWriting);
+
+// Seek to an offset within a file open for reading.
+void DirectSeekFile(FileHandle aFd, uint64_t aOffset);
+
+// Close or delete a file.
+void DirectCloseFile(FileHandle aFd);
+void DirectDeleteFile(const char* aFilename);
+
+// Append data to a file open for writing, blocking until the write completes.
+void DirectWrite(FileHandle aFd, const void* aData, size_t aSize);
+
+// Print a string directly to stderr.
+void DirectPrint(const char* aString);
+
+// Read data from a file, blocking until the read completes.
+size_t DirectRead(FileHandle aFd, void* aData, size_t aSize);
+
+// Create a new pipe.
+void DirectCreatePipe(FileHandle* aWriteFd, FileHandle* aReadFd);
+
+// Spawn a new thread.
+void DirectSpawnThread(void (*aFunction)(void*), void* aArgument);
+
+} // recordreplay
+} // mozilla
+
+#endif // mozilla_recordreplay_ProcessRecordReplay_h
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/ProcessRedirect.cpp
@@ -0,0 +1,686 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "ProcessRedirect.h"
+
+#include "InfallibleVector.h"
+#include "mozilla/Sprintf.h"
+
+#include <dlfcn.h>
+#include <string.h>
+
+namespace {
+
+#include "udis86/udis86.c"
+#include "udis86/decode.c"
+#include "udis86/itab.c"
+
+} // anonymous namespace
+
+namespace mozilla {
+namespace recordreplay {
+
+///////////////////////////////////////////////////////////////////////////////
+// Library API Redirections
+///////////////////////////////////////////////////////////////////////////////
+
+// Redirecting system library APIs requires delicacy. We have to patch the code
+// so that whenever control reaches the beginning of the library API's symbol,
+// we will end up jumping to an address of our choice instead. This has to be
+// done without corrupting the instructions of any functions in the library,
+// which principally means ensuring that there are no internal jumps into the
+// code segments we have patched.
+//
+// The patching we do here might fail: it isn't possible to redirect an
+// arbitrary symbol within an arbitrary block of code. We are doing a best
+// effort sort of thing, and any failures will be noted for reporting and
+// without touching the original code at all.
+
+// Keep track of the jumps we know about which could affect the validity if a
+// code patch.
+static StaticInfallibleVector<std::pair<uint8_t*,uint8_t*>> gInternalJumps;
+
+// Jump to patch in at the end of redirecting. To avoid issues with calling
+// redirected functions before all redirections have been installed
+// (particularly due to locks being taken while checking for internal jump
+// targets), all modification of the original code is delayed until after no
+// further system calls are needed.
+struct JumpPatch
+{
+  uint8_t* mStart;
+  uint8_t* mTarget;
+  bool mShort;
+
+  JumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
+    : mStart(aStart), mTarget(aTarget), mShort(aShort)
+  {}
+};
+static StaticInfallibleVector<JumpPatch> gJumpPatches;
+
+static void
+AddJumpPatch(uint8_t* aStart, uint8_t* aTarget, bool aShort)
+{
+  gInternalJumps.emplaceBack(aStart, aTarget);
+  gJumpPatches.emplaceBack(aStart, aTarget, aShort);
+}
+
+// A range of instructions to clobber at the end of redirecting.
+struct ClobberPatch
+{
+  uint8_t* mStart;
+  uint8_t* mEnd;
+
+  ClobberPatch(uint8_t* aStart, uint8_t* aEnd)
+    : mStart(aStart), mEnd(aEnd)
+  {}
+};
+static StaticInfallibleVector<ClobberPatch> gClobberPatches;
+
+static void
+AddClobberPatch(uint8_t* aStart, uint8_t* aEnd)
+{
+  if (aStart < aEnd) {
+    gClobberPatches.emplaceBack(aStart, aEnd);
+  }
+}
+
+static uint8_t*
+SymbolBase(uint8_t* aPtr)
+{
+  Dl_info info;
+  if (!dladdr(aPtr, &info)) {
+    MOZ_CRASH();
+  }
+  return static_cast<uint8_t*>(info.dli_saddr);
+}
+
+// Use Udis86 to decode a single instruction, returning the number of bytes
+// consumed.
+static size_t
+DecodeInstruction(uint8_t* aIp, ud_t* aUd)
+{
+  ud_init(aUd);
+  ud_set_input_buffer(aUd, aIp, MaximumInstructionLength);
+  ud_set_mode(aUd, 64);
+
+  size_t nbytes = ud_decode(aUd);
+  MOZ_RELEASE_ASSERT(nbytes && nbytes <= MaximumInstructionLength);
+  return nbytes;
+}
+
+// If it is unsafe to patch new instructions into [aIpStart, aIpEnd> then
+// return an instruction at which a new search can be started from.
+static uint8_t*
+MaybeInternalJumpTarget(uint8_t* aIpStart, uint8_t* aIpEnd)
+{
+  // The start and end have to be associated with the same symbol, as otherwise
+  // a jump could come into the start of the later symbol.
+  const char* startName = SymbolNameRaw(aIpStart);
+  const char* endName = SymbolNameRaw(aIpEnd - 1);
+  if (strcmp(startName, endName)) {
+    return SymbolBase(aIpEnd - 1);
+  }
+
+  // Look for any internal jumps from outside the patch range into the middle
+  // of the patch range.
+  for (auto jump : gInternalJumps) {
+    if (!(jump.first >= aIpStart && jump.first < aIpEnd) &&
+        jump.second > aIpStart && jump.second < aIpEnd) {
+      return jump.second;
+    }
+  }
+
+  // Treat patched regions of code as if they had internal jumps.
+  for (auto patch : gJumpPatches) {
+    uint8_t* end = patch.mStart + (patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
+    if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, end - patch.mStart)) {
+      return end;
+    }
+  }
+  for (auto patch : gClobberPatches) {
+    if (MemoryIntersects(aIpStart, aIpEnd - aIpStart, patch.mStart, patch.mEnd - patch.mStart)) {
+      return patch.mEnd;
+    }
+  }
+
+  if ((size_t)(aIpEnd - aIpStart) > ShortJumpBytes) {
+    // Manually annotate functions which might have backedges that interfere
+    // with redirecting the initial bytes of the function. Ideally we would
+    // find these backedges with some binary analysis, but this is easier said
+    // than done, especially since there doesn't seem to be a standard way to
+    // determine the extent of a symbol's code on OSX. Use strstr to avoid
+    // issues with goo in the symbol names.
+    if ((strstr(startName, "CTRunGetGlyphs") &&
+         !strstr(startName, "CTRunGetGlyphsPtr")) ||
+        (strstr(startName, "CTRunGetPositions") &&
+         !strstr(startName, "CTRunGetPositionsPtr")) ||
+        (strstr(startName, "CTRunGetStringIndices") &&
+         !strstr(startName, "CTRunGetStringIndicesPtr")) ||
+        strstr(startName, "CGColorSpaceCreateDeviceRGB") ||
+        // For these functions, there is a syscall near the beginning which
+        // other system threads might be inside.
+        strstr(startName, "__workq_kernreturn") ||
+        strstr(startName, "kevent64")) {
+      return aIpEnd - 1;
+    }
+  }
+
+  return nullptr;
+}
+
+// Any reasons why redirection failed.
+static StaticInfallibleVector<char*> gRedirectFailures;
+
+static void
+RedirectFailure(const char* aFormat, ...)
+{
+  va_list ap;
+  va_start(ap, aFormat);
+  char buf[4096];
+  VsprintfLiteral(buf, aFormat, ap);
+  va_end(ap);
+  gRedirectFailures.emplaceBack(strdup(buf));
+}
+
+static void
+UnknownInstruction(const char* aName, uint8_t* aIp, size_t aNbytes)
+{
+  char buf[4096];
+  char* ptr = buf;
+  for (size_t i = 0; i < aNbytes; i++) {
+    int written = snprintf(ptr, sizeof(buf) - (ptr - buf), " %d", (int) aIp[i]);
+    ptr += written;
+  }
+  RedirectFailure("Unknown instruction in %s:%s", aName, buf);
+}
+
+// Try to emit instructions to |aAssembler| with equivalent behavior to any
+// special jump or ip-dependent instruction at |aIp|, returning true if the
+// instruction was copied.
+static bool
+CopySpecialInstruction(uint8_t* aIp, ud_t* aUd, size_t aNbytes, Assembler& aAssembler)
+{
+  aAssembler.NoteOriginalInstruction(aIp);
+
+  if (aUd->pfx_seg) {
+    return false;
+  }
+
+  ud_mnemonic_code mnemonic = ud_insn_mnemonic(aUd);
+  if (mnemonic == UD_Icall || mnemonic == UD_Ijmp || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijg)) {
+    MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 1));
+    const ud_operand* op = ud_insn_opr(aUd, 0);
+    if (op->type == UD_OP_JIMM) {
+      // Call or jump relative to rip.
+      uint8_t* target = aIp + aNbytes;
+      switch (op->size) {
+      case 8:  target += op->lval.sbyte;  break;
+      case 32: target += op->lval.sdword; break;
+      default: return false;
+      }
+      gInternalJumps.emplaceBack(nullptr, target);
+      if (mnemonic == UD_Icall) {
+        aAssembler.MoveImmediateToRax(target);
+        aAssembler.CallRax();
+      } else if (mnemonic == UD_Ijmp) {
+        aAssembler.Jump(target);
+      } else {
+        aAssembler.ConditionalJump(aUd->primary_opcode, target);
+      }
+      return true;
+    }
+    if (op->type == UD_OP_MEM && op->base == UD_R_RIP && !op->index && op->offset == 32) {
+      // jmp *$offset32(%rip)
+      uint8_t* addr = aIp + aNbytes + op->lval.sdword;
+      aAssembler.MoveImmediateToRax(addr);
+      aAssembler.LoadRax(8);
+      aAssembler.JumpToRax();
+      return true;
+    }
+  }
+
+  if (mnemonic == UD_Imov || mnemonic == UD_Ilea) {
+    MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
+    const ud_operand* dst = ud_insn_opr(aUd, 0);
+    const ud_operand* src = ud_insn_opr(aUd, 1);
+    if (dst->type == UD_OP_REG &&
+        src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
+      // mov/lea $offset32(%rip), reg
+      int reg = Assembler::NormalizeRegister(dst->base);
+      if (!reg) {
+        return false;
+      }
+      uint8_t* addr = aIp + aNbytes + src->lval.sdword;
+      if (reg != UD_R_RAX) {
+        aAssembler.PushRax();
+      }
+      aAssembler.MoveImmediateToRax(addr);
+      if (mnemonic == UD_Imov) {
+        aAssembler.LoadRax(src->size / 8);
+      }
+      if (reg != UD_R_RAX) {
+        aAssembler.MoveRaxToRegister(reg);
+        aAssembler.PopRax();
+      }
+      return true;
+    }
+    if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
+        src->type == UD_OP_REG && mnemonic == UD_Imov) {
+      // movl reg, $offset32(%rip)
+      int reg = Assembler::NormalizeRegister(src->base);
+      if (!reg) {
+        return false;
+      }
+      uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
+      aAssembler.PushRax();
+      aAssembler.PushRbx();
+      aAssembler.MoveRegisterToRax(reg);
+      aAssembler.PushRax();
+      aAssembler.PopRbx();
+      aAssembler.MoveImmediateToRax(addr);
+      aAssembler.StoreRbxToRax(src->size / 8);
+      aAssembler.PopRbx();
+      aAssembler.PopRax();
+      return true;
+    }
+  }
+
+  if (mnemonic == UD_Icmp) {
+    MOZ_RELEASE_ASSERT(!ud_insn_opr(aUd, 2));
+    const ud_operand* dst = ud_insn_opr(aUd, 0);
+    const ud_operand* src = ud_insn_opr(aUd, 1);
+    if (dst->type == UD_OP_MEM && dst->base == UD_R_RIP && !dst->index && dst->offset == 32 &&
+        src->type == UD_OP_IMM && src->size == 8) {
+      // cmp $literal8, $offset32(%rip)
+      uint8_t value = src->lval.ubyte;
+      uint8_t* addr = aIp + aNbytes + dst->lval.sdword;
+      aAssembler.PushRax();
+      aAssembler.MoveImmediateToRax(addr);
+      aAssembler.LoadRax(dst->size / 8);
+      aAssembler.CompareValueWithRax(value, dst->size / 8);
+      aAssembler.PopRax();
+      return true;
+    }
+    if (dst->type == UD_OP_REG &&
+        src->type == UD_OP_MEM && src->base == UD_R_RIP && !src->index && src->offset == 32) {
+      // cmpq $offset32(%rip), reg
+      int reg = Assembler::NormalizeRegister(dst->base);
+      if (!reg) {
+        return false;
+      }
+      uint8_t* addr = aIp + aNbytes + src->lval.sdword;
+      aAssembler.PushRax();
+      aAssembler.MoveRegisterToRax(reg);
+      aAssembler.PushRax();
+      aAssembler.MoveImmediateToRax(addr);
+      aAssembler.LoadRax(8);
+      aAssembler.CompareRaxWithTopOfStack();
+      aAssembler.PopRax();
+      aAssembler.PopRax();
+      return true;
+    }
+  }
+
+  return false;
+}
+
+// Copy an instruction to aAssembler, returning the number of bytes used by the
+// instruction.
+static size_t
+CopyInstruction(const char* aName, uint8_t* aIp, Assembler& aAssembler)
+{
+  // Use Udis86 to decode a single instruction.
+  ud_t ud;
+  size_t nbytes = DecodeInstruction(aIp, &ud);
+
+  // Check for a special cased instruction.
+  if (CopySpecialInstruction(aIp, &ud, nbytes, aAssembler)) {
+    return nbytes;
+  }
+
+  // Don't copy call and jump instructions. We should have special cased these,
+  // and these may not behave correctly after a naive copy if their behavior is
+  // relative to the instruction pointer.
+  ud_mnemonic_code_t mnemonic = ud_insn_mnemonic(&ud);
+  if (mnemonic == UD_Icall || (mnemonic >= UD_Ijo && mnemonic <= UD_Ijmp)) {
+    UnknownInstruction(aName, aIp, nbytes);
+    return nbytes;
+  }
+
+  // Don't copy instructions which have the instruction pointer as an operand.
+  // We should have special cased these, and as above these will not behave
+  // correctly after being naively copied due to their dependence on the
+  // instruction pointer.
+  for (size_t i = 0;; i++) {
+    const ud_operand_t* op = ud_insn_opr(&ud, i);
+    if (!op) {
+      break;
+    }
+    switch (op->type) {
+    case UD_OP_MEM:
+      if (op->index == UD_R_RIP) {
+        UnknownInstruction(aName, aIp, nbytes);
+        return nbytes;
+      }
+      MOZ_FALLTHROUGH;
+    case UD_OP_REG:
+      if (op->base == UD_R_RIP) {
+        UnknownInstruction(aName, aIp, nbytes);
+        return nbytes;
+      }
+      break;
+    default:
+      break;
+    }
+  }
+
+  aAssembler.CopyInstruction(aIp, nbytes);
+  return nbytes;
+}
+
+// Copy all instructions containing bytes in the range [aIpStart, aIpEnd) to
+// the given assembler, returning the address of the first instruction not
+// copied (i.e. the fallthrough instruction from the copied range).
+static uint8_t*
+CopyInstructions(const char* aName, uint8_t* aIpStart, uint8_t* aIpEnd,
+                 Assembler& aAssembler)
+{
+  MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(aIpStart, aIpEnd));
+
+  uint8_t* ip = aIpStart;
+  while (ip < aIpEnd) {
+    ip += CopyInstruction(aName, ip, aAssembler);
+  }
+  return ip;
+}
+
+// Get the instruction pointer to use as the address of the base function for a
+// redirection.
+static uint8_t*
+FunctionStartAddress(Redirection& aRedirection)
+{
+  uint8_t* addr = static_cast<uint8_t*>(dlsym(RTLD_DEFAULT, aRedirection.mName));
+  if (!addr)
+    return nullptr;
+
+  if (addr[0] == 0xFF && addr[1] == 0x25) {
+    return *(uint8_t**)(addr + 6 + *reinterpret_cast<int32_t*>(addr + 2));
+  }
+
+  return addr;
+}
+
+// Setup a redirection: overwrite the machine code for its base function, and
+// fill in its original function, to satisfy the function pointer behaviors
+// described in the Redirection structure. aCursor and aCursorEnd are used to
+// allocate executable memory for use in the redirection.
+static void
+Redirect(Redirection& aRedirection, Assembler& aAssembler, bool aFirstPass)
+{
+  // The patching we do here might fail: it isn't possible to redirect an
+  // arbitrary instruction pointer within an arbitrary block of code. This code
+  // is doing a best effort sort of thing, and on failure it will crash safely.
+  // The main thing we want to avoid is corrupting the code so that it has been
+  // redirected but might crash or behave incorrectly when executed.
+  uint8_t* functionStart = aRedirection.mBaseFunction;
+  uint8_t* ro = functionStart;
+
+  if (!functionStart) {
+    if (aFirstPass) {
+      PrintSpew("Could not find symbol %s for redirecting.\n", aRedirection.mName);
+    }
+    return;
+  }
+
+  if (aRedirection.mOriginalFunction != aRedirection.mBaseFunction) {
+    // We already redirected this function.
+    MOZ_RELEASE_ASSERT(!aFirstPass);
+    return;
+  }
+
+  // First, see if we can overwrite JumpBytesClobberRax bytes of instructions
+  // at the base function with a direct jump to the new function. Rax is never
+  // live at the start of a function and we can emit a jump to an arbitrary
+  // location with fewer instruction bytes on x64 if we clobber it.
+  //
+  // This will work if there are no extraneous jump targets within the region
+  // of memory we are overwriting. If there are, we will corrupt the behavior
+  // of those jumps if we patch the memory.
+  uint8_t* extent = ro + JumpBytesClobberRax;
+  if (!MaybeInternalJumpTarget(ro, extent)) {
+    // Given code instructions for the base function as follows (AA are
+    // instructions we will end up copying, -- are instructions that will never
+    // be inspected or modified):
+    //
+    // base function: AA--
+    //
+    // Transform the code into:
+    //
+    // base function: J0--
+    // generated code: AAJ1
+    //
+    // Where J0 jumps to the new function, the original function is at AA, and
+    // J1 jumps to the point after J0.
+
+    // Set the new function to the start of the generated code.
+    aRedirection.mOriginalFunction = aAssembler.Current();
+
+    // Copy AA into generated code.
+    ro = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
+
+    // Emit jump J1.
+    aAssembler.Jump(ro);
+
+    // Emit jump J0.
+    AddJumpPatch(functionStart, aRedirection.mNewFunction, /* aShort = */ false);
+    AddClobberPatch(functionStart + JumpBytesClobberRax, ro);
+    return;
+  }
+
+  // We don't have enough space to patch in a long jump to an arbitrary
+  // instruction. Attempt to find another region of code that is long enough
+  // for two long jumps, has no internal jump targets, and is within range of
+  // the base function for a short jump.
+  //
+  // Given code instructions for the base function, with formatting as above:
+  //
+  // base function: AA--BBBB--
+  //
+  // Transform the code into:
+  //
+  // base function: J0--J1J2--
+  // generated code: AAJ3 BBBBJ4
+  //
+  // With the original function at AA, the jump targets are as follows:
+  //
+  // J0: short jump to J2
+  // J1: jump to BBBB
+  // J2: jump to the new function
+  // J3: jump to the point after J0
+  // J4: jump to the point after J2
+
+  // Skip this during the first pass, we don't want to patch a jump in over the
+  // initial bytes of a function we haven't redirected yet.
+  if (aFirstPass) {
+    return;
+  }
+
+  // The original symbol must have enough bytes to insert a short jump.
+  MOZ_RELEASE_ASSERT(!MaybeInternalJumpTarget(ro, ro + ShortJumpBytes));
+
+  // Copy AA into generated code.
+  aRedirection.mOriginalFunction = aAssembler.Current();
+  uint8_t* nro = CopyInstructions(aRedirection.mName, ro, ro + ShortJumpBytes, aAssembler);
+
+  // Emit jump J3.
+  aAssembler.Jump(nro);
+
+  // Keep advancing the instruction pointer until we get to a region that is
+  // large enough for two long jump patches.
+  ro = SymbolBase(extent);
+  while (true) {
+    extent = ro + JumpBytesClobberRax * 2;
+    uint8_t* target = MaybeInternalJumpTarget(ro, extent);
+    if (target) {
+      ro = target;
+      continue;
+    }
+    break;
+  }
+
+  // Copy BBBB into generated code.
+  uint8_t* firstJumpTarget = aAssembler.Current();
+  uint8_t* afterip = CopyInstructions(aRedirection.mName, ro, extent, aAssembler);
+
+  // Emit jump J4.
+  aAssembler.Jump(afterip);
+
+  // Emit jump J1.
+  AddJumpPatch(ro, firstJumpTarget, /* aShort = */ false);
+
+  // Emit jump J2.
+  AddJumpPatch(ro + JumpBytesClobberRax, aRedirection.mNewFunction, /* aShort = */ false);
+  AddClobberPatch(ro + 2 * JumpBytesClobberRax, afterip);
+
+  // Emit jump J0.
+  AddJumpPatch(functionStart, ro + JumpBytesClobberRax, /* aShort = */ true);
+  AddClobberPatch(functionStart + ShortJumpBytes, nro);
+}
+
+void
+EarlyInitializeRedirections()
+{
+  for (size_t i = 0;; i++) {
+    Redirection& redirection = gRedirections[i];
+    if (!redirection.mName) {
+      break;
+    }
+    MOZ_ASSERT(!redirection.mBaseFunction);
+    MOZ_ASSERT(redirection.mNewFunction);
+    MOZ_ASSERT(!redirection.mOriginalFunction);
+
+    redirection.mBaseFunction = FunctionStartAddress(redirection);
+    redirection.mOriginalFunction = redirection.mBaseFunction;
+
+    if (redirection.mBaseFunction && IsRecordingOrReplaying()) {
+      // We will get confused if we try to redirect the same address in multiple places.
+      for (size_t j = 0; j < i; j++) {
+        if (gRedirections[j].mBaseFunction == redirection.mBaseFunction) {
+          PrintSpew("Redirection %s shares the same address as %s, skipping.\n",
+                    redirection.mName, gRedirections[j].mName);
+          redirection.mBaseFunction = nullptr;
+          break;
+        }
+      }
+    }
+  }
+}
+
+bool
+InitializeRedirections()
+{
+  MOZ_ASSERT(IsRecordingOrReplaying());
+
+  {
+    Assembler assembler;
+
+    for (size_t i = 0;; i++) {
+      Redirection& redirection = gRedirections[i];
+      if (!redirection.mName) {
+        break;
+      }
+      Redirect(redirection, assembler, /* aFirstPass = */ true);
+    }
+
+    for (size_t i = 0;; i++) {
+      Redirection& redirection = gRedirections[i];
+      if (!redirection.mName) {
+        break;
+      }
+      Redirect(redirection, assembler, /* aFirstPass = */ false);
+    }
+  }
+
+  // Don't install redirections if we had any failures.
+  if (!gRedirectFailures.empty()) {
+    size_t len = 4096;
+    gInitializationFailureMessage = new char[4096];
+    gInitializationFailureMessage[--len] = 0;
+
+    char* ptr = gInitializationFailureMessage;
+    for (char* reason : gRedirectFailures) {
+      size_t n = snprintf(ptr, len, "%s\n", reason);
+      if (n >= len) {
+        break;
+      }
+      ptr += n;
+      len -= n;
+    }
+
+    return false;
+  }
+
+  // Remove write protection from all patched regions, so that we don't call
+  // into the system while we are in the middle of redirecting.
+  for (const JumpPatch& patch : gJumpPatches) {
+    UnprotectExecutableMemory(patch.mStart, patch.mShort ? ShortJumpBytes : JumpBytesClobberRax);
+  }
+  for (const ClobberPatch& patch : gClobberPatches) {
+    UnprotectExecutableMemory(patch.mStart, patch.mEnd - patch.mStart);
+  }
+
+  // Do the actual patching of executable code for the functions we are
+  // redirecting.
+
+  for (const JumpPatch& patch : gJumpPatches) {
+    if (patch.mShort) {
+      Assembler::PatchShortJump(patch.mStart, patch.mTarget);
+    } else {
+      Assembler::PatchJumpClobberRax(patch.mStart, patch.mTarget);
+    }
+  }
+
+  for (const ClobberPatch& patch : gClobberPatches) {
+    for (uint8_t* ip = patch.mStart; ip < patch.mEnd; ip++) {
+      Assembler::PatchClobber(ip);
+    }
+  }
+
+  return true;
+}
+
+///////////////////////////////////////////////////////////////////////////////
+// Utility
+///////////////////////////////////////////////////////////////////////////////
+
+Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> gMemoryLeakBytes;
+
+void*
+BindFunctionArgument(void* aFunction, void* aArgument, size_t aArgumentPosition,
+                     Assembler& aAssembler)
+{
+  void* res = aAssembler.Current();
+
+  // On x64 the argument will be in a register, so to add an extra argument for
+  // the callee we just need to fill in the appropriate register for the
+  // argument position with the bound argument value.
+  aAssembler.MoveImmediateToRax(aArgument);
+
+  switch (aArgumentPosition) {
+  case 1: aAssembler.MoveRaxToRegister(UD_R_RSI); break;
+  case 2: aAssembler.MoveRaxToRegister(UD_R_RDX); break;
+  case 3: aAssembler.MoveRaxToRegister(UD_R_RCX); break;
+  default: MOZ_CRASH();
+  }
+
+  // Jump to the function that was bound.
+  aAssembler.Jump(aFunction);
+
+  return res;
+}
+
+} // namespace recordreplay
+} // namespace mozilla
new file mode 100644
--- /dev/null
+++ b/toolkit/recordreplay/ProcessRedirect.h
@@ -0,0 +1,774 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_recordreplay_ProcessRedirect_h
+#define mozilla_recordreplay_ProcessRedirect_h
+
+#include "Assembler.h"
+#include "Callback.h"
+#include "CallFunction.h"
+#include "ProcessRecordReplay.h"
+#include "ProcessRewind.h"
+#include "Thread.h"
+#include "ipc/Channel.h"
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Casting.h"
+
+#include <errno.h>
+
+namespace mozilla {
+namespace recordreplay {
+
+// Redirections Overview.
+//
+// The vast majority of recording and replaying is done through function
+// redirections. When the record/replay system is initialized, a set of system
+// library API functions have their machine code modified so that when that API
+// is called it redirects control to a custom record/replay function with the
+// same signature. Machine code is also generated that contains any overwritten
+// instructions in the API, and which may be called to get the API's original
+// behavior before it was redirected.
+//
+// In the usual case, a record/replay function redirection does the following
+// standard steps:
+//
+// 1. If events are being passed through, the original function is called and
+//    its results returned to the caller, as if the redirection was not there
+//    at all.
+//
+// 2. If events are not passed through and we are recording, the original
+//    function is called, and then an event is recorded for the current thread
+//    along with any outputs produced by the call.
+//
+// 3. If events are not passed through and we are replaying, the original
+//    function is *not* called, but rather the event and outputs are read from
+//    the recording and sent back to the caller.
+//
+// Macros are provided below to streamline this process. Redirections do not
+// need to adhere to this protocol, however, and can have whatever behaviors
+// that are necessary for reliable record/replay.
+//
+// Some platforms need additional redirection techniques for handling different
+// features of that platform. See the individual ProcessRedirect*.cpp files for
+// descriptions of these.
+//
+// The main advantage of using redirections is that Gecko code does not need to
+// be modified at all to take advantage of them. Redirected APIs should be
+// functions that are directly called by Gecko code and are part of system
+// libraries. These APIs are well defined, well documented by the platform, and
+// stable. The main maintenance burden going forward is in handling new library
+// APIs that were not previously called by Gecko.
+//
+// The main risk with using function redirections is that the set of redirected
+// functions is incomplete. If a library API is not redirected then it might
+// behave differently between recording and replaying, or it might crash while
+// replaying.
+
+///////////////////////////////////////////////////////////////////////////////
+// Function Redirections
+///////////////////////////////////////////////////////////////////////////////
+
+// Information about a system library API function which is being redirected.
+struct Redirection
+{
+  // Name of the function being redirected.
+  const char* mName;
+
+  // Address of the function which is being redirected. The code for this
+  // function is modified so that attempts to call this function will instead
+  // call mNewFunction.
+  uint8_t* mBaseFunction;
+
+  // Function with the same signature as mBaseFunction, which may have
+  // different behavior for recording/replaying the call.
+  uint8_t* mNewFunction;
+
+  // Function with the same signature and original behavior as
+  // mBaseFunction.
+  uint8_t* mOriginalFunction;
+};
+
+// All platform specific redirections, indexed by the call event.
+extern Redirection gRedirections[];
+
+// Do early initialization of redirections. This is done on both
+// recording/replaying and middleman processes, and allows OriginalCall() to
+// work in either case.
+void EarlyInitializeRedirections();
+
+// Set up all platform specific redirections, or fail and set
+// gInitializationFailureMessage.
+bool InitializeRedirections();
+
+// Generic type for a system error code.
+typedef ssize_t ErrorType;
+
+// Functions for saving or restoring system error codes.
+static inline ErrorType SaveError() { return errno; }
+static inline void RestoreError(ErrorType aError) { errno = aError; }
+
+// Specify the default ABI to use by the record/replay macros below.
+#define DEFAULTABI
+
+// Define CallFunction(...) for all supported ABIs.
+DefineAllCallFunctions(DEFAULTABI)
+
+// Get the address of the original function for a call event ID.
+static inline void*
+OriginalFunction(size_t aCallId)
+{
+  return gRedirections[aCallId].mOriginalFunction;
+}
+
+#define TokenPaste(aFirst, aSecond) aFirst ## aSecond
+
+// Call the original function for a call event ID with a particular ABI and any
+// number of arguments.
+#define OriginalCallABI(aName, aReturnType, aABI, ...)          \
+  TokenPaste(CallFunction, aABI) <aReturnType>                  \
+    (OriginalFunction(CallEvent_ ##aName), ##__VA_ARGS__)
+
+// Call the original function for a call event ID with the default ABI.
+#define OriginalCall(aName, aReturnType, ...)                   \
+  OriginalCallABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
+
+// State for a function redirection which performs the standard steps (see the
+// comment at the start of this file). This should not be created directly, but
+// rather through one of the macros below.
+struct AutoRecordReplayFunctionVoid
+{
+  // The current thread, or null if events are being passed through.
+  Thread* mThread;
+
+  // Any system error generated by the call which was redirected.
+  ErrorType mError;
+
+protected:
+  // Information about the call being recorded.
+  size_t mCallId;
+  const char* mCallName;
+
+public:
+  AutoRecordReplayFunctionVoid(size_t aCallId, const char* aCallName)
+    : mThread(AreThreadEventsPassedThrough() ? nullptr : Thread::Current()),
+      mError(0), mCallId(aCallId), mCallName(aCallName)
+  {
+    if (mThread) {
+      // Calling any redirection which performs the standard steps will cause
+      // debugger operations that have diverged from the recording to fail.
+      EnsureNotDivergedFromRecording();
+
+      MOZ_ASSERT(!AreThreadEventsDisallowed());
+
+      // Pass through events in case we are calling the original function.
+      mThread->SetPassThrough(true);
+    }
+  }
+
+  ~AutoRecordReplayFunctionVoid()
+  {
+    if (mThread) {
+      // Restore any error saved or replayed earlier to the system.
+      RestoreError(mError);
+    }
+  }
+
+  // Begin recording or replaying data for the call. This must be called before
+  // destruction if mThread is non-null.
+  inline void StartRecordReplay() {
+    MOZ_ASSERT(mThread);
+
+    // Save any system error in case we want to record/replay it.
+    mError = SaveError();
+
+    // Stop the event passing through that was initiated in the constructor.
+    mThread->SetPassThrough(false);
+
+    // Add an event for the thread.
+    RecordReplayAssert("%s", mCallName);
+    ThreadEvent ev = (ThreadEvent)((uint32_t)ThreadEvent::CallStart + mCallId);
+    mThread->Events().RecordOrReplayThreadEvent(ev);
+  }
+};
+
+// State for a function redirection that performs the standard steps and also
+// returns a value.
+template <typename ReturnType>
+struct AutoRecordReplayFunction : AutoRecordReplayFunctionVoid
+{
+  // The value which this function call should return.
+  ReturnType mRval;
+
+  AutoRecordReplayFunction(size_t aCallId, const char* aCallName)
+    : AutoRecordReplayFunctionVoid(aCallId, aCallName)
+  {}
+};
+
+// Macros for recording or replaying a function that performs the standard
+// steps. These macros should be used near the start of the body of a
+// redirection function, and will fall through only if events are not
+// passed through and the outputs of the function need to be recorded or
+// replayed.
+//
+// These macros define an AutoRecordReplayFunction local |rrf| with state for
+// the redirection, and additional locals |events| and (if the function has a
+// return value) |rval| for convenient access.
+
+// Record/replay a function that returns a value and has a particular ABI.
+#define RecordReplayFunctionABI(aName, aReturnType, aABI, ...)          \
+  AutoRecordReplayFunction<aReturnType> rrf(CallEvent_ ##aName, #aName); \
+  if (!rrf.mThread) {                                                   \
+    return OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__);    \
+  }                                                                     \
+  if (IsRecording()) {                                                  \
+    rrf.mRval = OriginalCallABI(aName, aReturnType, aABI, ##__VA_ARGS__); \
+  }                                                                     \
+  rrf.StartRecordReplay();                                              \
+  Stream& events = rrf.mThread->Events();                               \
+  (void) events;                                                        \
+  aReturnType& rval = rrf.mRval
+
+// Record/replay a function that returns a value and has the default ABI.
+#define RecordReplayFunction(aName, aReturnType, ...)                   \
+  RecordReplayFunctionABI(aName, aReturnType, DEFAULTABI, ##__VA_ARGS__)
+
+// Record/replay a function that has no return value and has a particular ABI.
+#define RecordReplayFunctionVoidABI(aName, aABI, ...)                   \
+  AutoRecordReplayFunctionVoid rrf(CallEvent_ ##aName, #aName);         \
+  if (!rrf.mThread) {                                                   \
+    OriginalCallABI(aName, void, aABI, ##__VA_ARGS__);                  \
+    return;                                                             \
+  }                                                                     \
+  if (IsRecording()) {                                                  \
+    OriginalCallABI(aName, void, aABI, ##__VA_ARGS__);                  \
+  }                                                                     \
+  rrf.StartRecordReplay();                                              \
+  Stream& events = rrf.mThread->Events();                               \
+  (void) events
+
+// Record/replay a function that has no return value and has the default ABI.
+#define RecordReplayFunctionVoid(aName, ...)                    \
+  RecordReplayFunctionVoidABI(aName, DEFAULTABI, ##__VA_ARGS__)
+
+// The following macros are used for functions that do not record an error and
+// take or return values of specified types.
+//
+// aAT == aArgumentType
+// aRT == aReturnType
+
+#define RRFunctionTypes0(aName, aRT)                             \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName ()                                                 \
+  {                                                              \
+    RecordReplayFunction(aName, aRT);                            \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes1(aName, aRT, aAT0)                       \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0)                                          \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0);                        \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes2(aName, aRT, aAT0, aAT1)                 \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1)                                 \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1);                    \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes3(aName, aRT, aAT0, aAT1, aAT2)           \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2)                        \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2);                \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes4(aName, aRT, aAT0, aAT1, aAT2, aAT3)     \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3)               \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3);            \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes5(aName, aRT, aAT0, aAT1, aAT2, aAT3,     \
+                         aAT4)                                   \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4)      \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4);        \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes6(aName, aRT, aAT0, aAT1, aAT2, aAT3,     \
+                         aAT4, aAT5)                             \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4,      \
+               aAT5 a5)                                          \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5);    \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes7(aName, aRT, aAT0, aAT1, aAT2, aAT3,     \
+                         aAT4, aAT5, aAT6)                       \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4,      \
+               aAT5 a5, aAT6 a6)                                 \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6); \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes8(aName, aRT, aAT0, aAT1, aAT2, aAT3,     \
+                         aAT4, aAT5, aAT6, aAT7)                 \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4,      \
+               aAT5 a5, aAT6 a6, aAT7 a7)                        \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7); \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes9(aName, aRT, aAT0, aAT1, aAT2, aAT3,     \
+                         aAT4, aAT5, aAT6, aAT7, aAT8)           \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4,      \
+               aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8)               \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8); \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypes10(aName, aRT, aAT0, aAT1, aAT2, aAT3,    \
+                          aAT4, aAT5, aAT6, aAT7, aAT8, aAT9)    \
+  static aRT DEFAULTABI                                          \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4,      \
+               aAT5 a5, aAT6 a6, aAT7 a7, aAT8 a8, aAT9 a9)      \
+  {                                                              \
+    RecordReplayFunction(aName, aRT, a0, a1, a2, a3, a4, a5, a6, a7, a8, a9); \
+    events.RecordOrReplayValue(&rval);                           \
+    return rval;                                                 \
+  }
+
+#define RRFunctionTypesVoid1(aName, aAT0)                        \
+  static void DEFAULTABI                                         \
+  RR_ ##aName (aAT0 a0)                                          \
+  {                                                              \
+    RecordReplayFunctionVoid(aName, a0);                         \
+  }
+
+#define RRFunctionTypesVoid2(aName, aAT0, aAT1)                  \
+  static void DEFAULTABI                                         \
+  RR_ ##aName (aAT0 a0, aAT1 a1)                                 \
+  {                                                              \
+    RecordReplayFunctionVoid(aName, a0, a1);                     \
+  }
+
+#define RRFunctionTypesVoid3(aName, aAT0, aAT1, aAT2)            \
+  static void DEFAULTABI                                         \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2)                        \
+  {                                                              \
+    RecordReplayFunctionVoid(aName, a0, a1, a2);                 \
+  }
+
+#define RRFunctionTypesVoid4(aName, aAT0, aAT1, aAT2, aAT3)      \
+  static void DEFAULTABI                                         \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3)               \
+  {                                                              \
+    RecordReplayFunctionVoid(aName, a0, a1, a2, a3);             \
+  }
+
+#define RRFunctionTypesVoid5(aName, aAT0, aAT1, aAT2, aAT3, aAT4) \
+  static void DEFAULTABI                                         \
+  RR_ ##aName (aAT0 a0, aAT1 a1, aAT2 a2, aAT3 a3, aAT4 a4)      \
+  {                                                              \
+    RecordReplayFunctionVoid(aName, a0, a1, a2, a3, a4);         \
+  }
+
+// The following macros are used for functions that take and return scalar
+// values (not a struct or a floating point) and do not record an error
+// anywhere.
+
+#define RRFunction0(aName) \
+  RRFunctionTypes0(aName, size_t)
+
+#define RRFunction1(aName) \
+  RRFunctionTypes1(aName, size_t, size_t)
+
+#define RRFunction2(aName) \
+  RRFunctionTypes2(aName, size_t, size_t, size_t)
+
+#define RRFunction3(aName) \
+  RRFunctionTypes3(aName, size_t, size_t, size_t, size_t)
+
+#define RRFunction4(aName) \
+  RRFunctionTypes4(aName, size_t, size_t, size_t, size_t, size_t)
+
+#define RRFunction5(aName) \
+  RRFunctionTypes5(aName, size_t, size_t, size_t, size_t, size_t, size_t)
+
+#define RRFunction6(aName) \
+  RRFunctionTypes6(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
+
+#define RRFunction7(aName) \
+  RRFunctionTypes7(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t)
+
+#define RRFunction8(aName) \
+  RRFunctionTypes8(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
+                   size_t)
+
+#define RRFunction9(aName) \
+  RRFunctionTypes9(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
+                   size_t, size_t)
+
+#define RRFunction10(aName) \
+  RRFunctionTypes10(aName, size_t, size_t, size_t, size_t, size_t, size_t, size_t, size_t, \
+                    size_t, size_t, size_t)
+
+// The following macros are used for functions that take scalar arguments and
+// do not return a value or record an error anywhere.
+
+#define RRFunctionVoid0(aName)                                   \
+  static void DEFAULTABI                                         \
+  RR_ ##aName ()                                                 \
+  {                                                              \
+    RecordReplayFunctionVoid(aName);                             \
+  }
+
+#define RRFunctionVoid1(aName) \
+  RRFunctionTypesVoid1(aName, size_t)
+
+#define RRFunctionVoid2(aName) \
+  RRFunctionTypesVoid2(aName, size_t, size_t)
+
+#define RRFunctionVoid3(aName) \
+  RRFunctionTypesVoid3(aName, size_t, size_t, size_t)
+
+#define RRFunctionVoid4(aName) \
+  RRFunctionTypesVoid4(aName, size_t, size_t, size_t, size_t)
+
+#define RRFunctionVoid5(aName) \
+  RRFunctionTypesVoid5(aName, size_t, size_t, size_t, size_t, size_t)
+
+// The following macros are used for functions that return a signed integer
+// value and record an error if the return value is negative.
+
+#define RRFunctionNegError0(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName ()                                                 \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t);                        \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError1(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0)                                        \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0);                    \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError2(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0, size_t a1)                             \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0, a1);                \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError3(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0, size_t a1, size_t a2)                  \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0, a1, a2);            \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError4(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3)       \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3);        \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError5(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3,       \
+               size_t a4)                                        \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4);    \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+#define RRFunctionNegError6(aName)                               \
+  static ssize_t DEFAULTABI                                      \
+  RR_ ##aName (size_t a0, size_t a1, size_t a2, size_t a3,       \
+               size_t a4, size_t a5)                             \
+  {                                                              \
+    RecordReplayFunction(aName, ssize_t, a0, a1, a2, a3, a4, a5); \
+    RecordOrReplayHadErrorNegative(rrf);                         \
+    return rval;                                                 \
+  }
+
+// The following macros are use