Bug 1470795 Part 3 - Trivial renaming due to API changes, r=froydnj.
authorBrian Hackett <bhackett1024@gmail.com>
Sun, 22 Jul 2018 11:57:38 +0000
changeset 469495 669eb891fa02815229756050399c754d0c2a611e
parent 469494 2500fc654564002eea1ba8f974e6f560a55e7835
child 469496 301403057435baf88a758f9176a3c7cf4844ea48
push idunknown
push userunknown
push dateunknown
reviewersfroydnj
bugs1470795
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1470795 Part 3 - Trivial renaming due to API changes, r=froydnj.
toolkit/recordreplay/HashTable.cpp
toolkit/recordreplay/MemorySnapshot.cpp
toolkit/recordreplay/MemorySnapshot.h
toolkit/recordreplay/ProcessRecordReplay.cpp
toolkit/recordreplay/ProcessRedirectDarwin.cpp
toolkit/recordreplay/ProcessRewind.cpp
toolkit/recordreplay/ThreadSnapshot.cpp
toolkit/recordreplay/ThreadSnapshot.h
toolkit/recordreplay/ipc/Channel.h
toolkit/recordreplay/ipc/ChildProcess.cpp
toolkit/recordreplay/ipc/ParentIPC.cpp
--- a/toolkit/recordreplay/HashTable.cpp
+++ b/toolkit/recordreplay/HashTable.cpp
@@ -105,22 +105,22 @@ public:
   StableHashTableInfo()
     : mMagic(MagicNumber)
     , mLastKey(nullptr)
     , mLastNewHash(0)
     , mHashGenerator(0)
     , mCallbackStorage(nullptr)
   {
     // Use AllocateMemory, as the result will have RWX permissions.
-    mCallbackStorage = (uint8_t*) AllocateMemory(CallbackStorageCapacity, TrackedMemoryKind);
+    mCallbackStorage = (uint8_t*) AllocateMemory(CallbackStorageCapacity, MemoryKind::Tracked);
   }
 
   ~StableHashTableInfo() {
     MOZ_ASSERT(mHashToKey.empty());
-    DeallocateMemory(mCallbackStorage, CallbackStorageCapacity, TrackedMemoryKind);
+    DeallocateMemory(mCallbackStorage, CallbackStorageCapacity, MemoryKind::Tracked);
   }
 
   bool AppearsValid() {
     return mMagic == MagicNumber;
   }
 
   void AddKey(HashNumber aOriginalHash, const void* aKey, HashNumber aNewHash) {
     HashToKeyMap::iterator iter = mHashToKey.find(aOriginalHash);
--- a/toolkit/recordreplay/MemorySnapshot.cpp
+++ b/toolkit/recordreplay/MemorySnapshot.cpp
@@ -1,18 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "MemorySnapshot.h"
 
-#include "ipc/ChildIPC.h"
-#include "js/ReplayHooks.h"
+#include "ipc/ChildInternal.h"
 #include "mozilla/Maybe.h"
 #include "DirtyMemoryHandler.h"
 #include "InfallibleVector.h"
 #include "ProcessRecordReplay.h"
 #include "ProcessRewind.h"
 #include "SpinLock.h"
 #include "SplayTree.h"
 #include "Thread.h"
@@ -156,45 +155,45 @@ struct DirtyPage {
     static ssize_t compare(uint8_t* aBase, const DirtyPage& aPage) {
       return aBase - aPage.mBase;
     }
   };
 };
 
 // A set of dirty pages that can be searched quickly.
 typedef SplayTree<DirtyPage, DirtyPage::AddressSort,
-                  AllocPolicy<UntrackedMemoryKind::SortedDirtyPageSet>, 4> SortedDirtyPageSet;
+                  AllocPolicy<MemoryKind::SortedDirtyPageSet>, 4> SortedDirtyPageSet;
 
 // A set of dirty pages associated with some checkpoint.
 struct DirtyPageSet {
   // Checkpoint associated with this set.
   CheckpointId mCheckpoint;
 
   // All dirty pages in the set. Pages may be added or destroyed by the main
   // thread when all other threads are idle, by the dirty memory handler when
   // it is active and this is the active page set, and by the snapshot thread
   // which owns this set.
-  InfallibleVector<DirtyPage, 256, AllocPolicy<UntrackedMemoryKind::DirtyPageSet>> mPages;
+  InfallibleVector<DirtyPage, 256, AllocPolicy<MemoryKind::DirtyPageSet>> mPages;
 
   explicit DirtyPageSet(const CheckpointId& aCheckpoint)
     : mCheckpoint(aCheckpoint)
   {}
 };
 
 // Worklist used by each snapshot thread.
 struct SnapshotThreadWorklist {
   // Index into gMemoryInfo->mSnapshotWorklists of the thread.
   size_t mThreadIndex;
 
   // Record/replay ID of the thread.
   size_t mThreadId;
 
   // Sets of pages in the thread's worklist. Each set is for a different diff,
   // with the oldest checkpoints first.
-  InfallibleVector<DirtyPageSet, 256, AllocPolicy<UntrackedMemoryKind::Generic>> mSets;
+  InfallibleVector<DirtyPageSet, 256, AllocPolicy<MemoryKind::Generic>> mSets;
 };
 
 // Structure used to coordinate activity between the main thread and all
 // snapshot threads. The workflow with this structure is as follows:
 //
 // 1. The main thread calls ActivateBegin(), marking the condition as active
 //    and notifying each snapshot thread. The main thread blocks in this call.
 //
@@ -227,17 +226,17 @@ public:
 
 static const size_t NumSnapshotThreads = 8;
 
 // A set of free regions in the process. There are two of these, for the
 // free regions in tracked and untracked memory.
 class FreeRegionSet {
   // Kind of memory being managed. This also describes the memory used by the
   // set itself.
-  AllocatedMemoryKind mKind;
+  MemoryKind mKind;
 
   // Lock protecting contents of the structure.
   SpinLock mLock;
 
   // To avoid reentrancy issues when growing the set, a chunk of pages for
   // the splay tree is preallocated for use the next time the tree needs to
   // expand its size.
   static const size_t ChunkPages = 4;
@@ -272,22 +271,22 @@ class FreeRegionSet {
                     AllocatedMemoryRegion::SizeReverseSort,
                     MyAllocPolicy, ChunkPages> Tree;
   Tree mRegions;
 
   void InsertLockHeld(void* aAddress, size_t aSize, AutoSpinLock& aLockHeld);
   void* ExtractLockHeld(size_t aSize, AutoSpinLock& aLockHeld);
 
 public:
-  explicit FreeRegionSet(AllocatedMemoryKind aKind)
+  explicit FreeRegionSet(MemoryKind aKind)
     : mKind(aKind), mRegions(MyAllocPolicy(*this))
   {}
 
   // Get the single region set for a given memory kind.
-  static FreeRegionSet& Get(AllocatedMemoryKind aKind);
+  static FreeRegionSet& Get(MemoryKind aKind);
 
   // Add a free region to the set.
   void Insert(void* aAddress, size_t aSize);
 
   // Remove a free region of the specified size. If aAddress is specified then
   // this address will be prioritized, but a different pointer may be returned.
   // The resulting memory will be zeroed.
   void* Extract(void* aAddress, size_t aSize);
@@ -307,19 +306,19 @@ struct MemoryInfo {
   // issues.
   static const size_t MaxInitialUntrackedRegions = 256;
   AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
   SpinLock mInitialUntrackedRegionsLock;
 
   // All tracked memory in the process. This may be updated by any thread while
   // holding mTrackedRegionsLock.
   SplayTree<AllocatedMemoryRegion, AllocatedMemoryRegion::AddressSort,
-            AllocPolicy<UntrackedMemoryKind::TrackedRegions>, 4>
+            AllocPolicy<MemoryKind::TrackedRegions>, 4>
     mTrackedRegions;
-  InfallibleVector<AllocatedMemoryRegion, 512, AllocPolicy<UntrackedMemoryKind::TrackedRegions>>
+  InfallibleVector<AllocatedMemoryRegion, 512, AllocPolicy<MemoryKind::TrackedRegions>>
     mTrackedRegionsByAllocationOrder;
   SpinLock mTrackedRegionsLock;
 
   // Pages from |trackedRegions| modified since the last saved checkpoint.
   // Accessed by any thread (usually the dirty memory handler) when memory
   // changes are allowed, and by the main thread when memory changes are not
   // allowed.
   SortedDirtyPageSet mActiveDirty;
@@ -342,30 +341,30 @@ struct MemoryInfo {
   Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mCountdown;
 
   // Information for timers.
   double mStartTime;
   uint32_t mTimeHits[(size_t) TimerKind::Count];
   double mTimeTotals[(size_t) TimerKind::Count];
 
   // Information for memory allocation.
-  Atomic<ssize_t, Relaxed, Behavior::DontPreserve> mMemoryBalance[UntrackedMemoryKind::Count];
+  Atomic<ssize_t, Relaxed, Behavior::DontPreserve> mMemoryBalance[(size_t) MemoryKind::Count];
 
   // Recent dirty memory faults.
   void* mDirtyMemoryFaults[50];
 
   // Whether RecordReplayDirective may crash this process.
   bool mIntentionalCrashesAllowed;
 
   // Whether the CrashSoon directive has been given to this process.
   bool mCrashSoon;
 
   MemoryInfo()
     : mMemoryChangesAllowed(true)
-    , mFreeUntrackedRegions(UntrackedMemoryKind::FreeRegions)
+    , mFreeUntrackedRegions(MemoryKind::FreeRegions)
     , mStartTime(CurrentTime())
     , mIntentionalCrashesAllowed(true)
   {
     // The singleton MemoryInfo is allocated with zeroed memory, so other
     // fields do not need explicit initialization.
   }
 };
 
@@ -478,17 +477,17 @@ RecordReplayInterface_InternalRecordRepl
   case Directive::MaybeCrash:
     if (gMemoryInfo->mIntentionalCrashesAllowed && gMemoryInfo->mCrashSoon) {
       PrintSpew("Intentionally Crashing!\n");
       MOZ_CRASH("RecordReplayDirective intentional crash");
     }
     gMemoryInfo->mCrashSoon = false;
     break;
   case Directive::AlwaysSaveTemporaryCheckpoints:
-    JS::replay::hooks.alwaysSaveTemporaryCheckpoints();
+    navigation::AlwaysSaveTemporaryCheckpoints();
     break;
   case Directive::AlwaysMarkMajorCheckpoints:
     child::NotifyAlwaysMarkMajorCheckpoints();
     break;
   default:
     MOZ_CRASH("Unknown directive");
   }
 }
@@ -553,24 +552,24 @@ SnapshotThreadCondition::WaitUntilNoLong
 ///////////////////////////////////////////////////////////////////////////////
 // Snapshot Page Allocation
 ///////////////////////////////////////////////////////////////////////////////
 
 // Get a page in untracked memory that can be used as a copy of a tracked page.
 static uint8_t*
 AllocatePageCopy()
 {
-  return (uint8_t*) AllocateMemory(PageSize, UntrackedMemoryKind::PageCopy);
+  return (uint8_t*) AllocateMemory(PageSize, MemoryKind::PageCopy);
 }
 
 // Free a page allocated by AllocatePageCopy.
 static void
 FreePageCopy(uint8_t* aPage)
 {
-  DeallocateMemory(aPage, PageSize, UntrackedMemoryKind::PageCopy);
+  DeallocateMemory(aPage, PageSize, MemoryKind::PageCopy);
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Page Fault Handling
 ///////////////////////////////////////////////////////////////////////////////
 
 void
 MemoryMove(void* aDst, const void* aSrc, size_t aSize)
@@ -870,17 +869,17 @@ ProcessAllInitialMemoryRegions()
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Free Region Management
 ///////////////////////////////////////////////////////////////////////////////
 
 // All memory in gMemoryInfo->mTrackedRegions that is not in use at the current
 // point in execution.
-static FreeRegionSet gFreeRegions(TrackedMemoryKind);
+static FreeRegionSet gFreeRegions(MemoryKind::Tracked);
 
 // The size of gMemoryInfo->mTrackedRegionsByAllocationOrder we expect to see
 // at the point of the last saved checkpoint.
 static size_t gNumTrackedRegions;
 
 static void
 UpdateNumTrackedRegionsForSnapshot()
 {
@@ -897,19 +896,19 @@ FixupFreeRegionsAfterRewind()
   for (size_t i = gNumTrackedRegions; i < newTrackedRegions; i++) {
     const AllocatedMemoryRegion& region = gMemoryInfo->mTrackedRegionsByAllocationOrder[i];
     gFreeRegions.Insert(region.mBase, region.mSize);
   }
   gNumTrackedRegions = newTrackedRegions;
 }
 
 /* static */ FreeRegionSet&
-FreeRegionSet::Get(AllocatedMemoryKind aKind)
+FreeRegionSet::Get(MemoryKind aKind)
 {
-  return (aKind == TrackedMemoryKind) ? gFreeRegions : gMemoryInfo->mFreeUntrackedRegions;
+  return (aKind == MemoryKind::Tracked) ? gFreeRegions : gMemoryInfo->mFreeUntrackedRegions;
 }
 
 void*
 FreeRegionSet::TakeNextChunk()
 {
   MOZ_RELEASE_ASSERT(mNextChunk);
   void* res = mNextChunk;
   mNextChunk = nullptr;
@@ -926,17 +925,17 @@ void
 FreeRegionSet::MaybeRefillNextChunk(AutoSpinLock& aLockHeld)
 {
   if (mNextChunk) {
     return;
   }
 
   // Look for a free region we can take the next chunk from.
   size_t size = ChunkPages * PageSize;
-  gMemoryInfo->mMemoryBalance[mKind] += size;
+  gMemoryInfo->mMemoryBalance[(size_t) mKind] += size;
 
   mNextChunk = ExtractLockHeld(size, aLockHeld);
 
   if (!mNextChunk) {
     // Allocate memory from the system.
     mNextChunk = DirectAllocateMemory(nullptr, size);
     RegisterAllocatedMemory(mNextChunk, size, mKind);
   }
@@ -1021,24 +1020,24 @@ FreeRegionSet::Intersects(void* aAddress
   return false;
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Memory Management
 ///////////////////////////////////////////////////////////////////////////////
 
 void
-RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, AllocatedMemoryKind aKind)
+RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, MemoryKind aKind)
 {
   MOZ_RELEASE_ASSERT(aBaseAddress == PageBase(aBaseAddress));
   MOZ_RELEASE_ASSERT(aSize == RoundupSizeToPageBoundary(aSize));
 
   uint8_t* aAddress = reinterpret_cast<uint8_t*>(aBaseAddress);
 
-  if (aKind != TrackedMemoryKind) {
+  if (aKind != MemoryKind::Tracked) {
     if (!HasSavedCheckpoint()) {
       AddInitialUntrackedMemoryRegion(aAddress, aSize);
     }
   } else if (HasSavedCheckpoint()) {
     EnsureMemoryChangesAllowed();
     DirectWriteProtectMemory(aAddress, aSize, true);
     AddTrackedRegion(aAddress, aSize, true);
   }
@@ -1091,89 +1090,85 @@ RestoreWritableFixedMemory(void* aAddres
     uint8_t* page = (uint8_t*)aAddress + offset;
     if (gMemoryInfo->mActiveDirty.maybeLookup(page)) {
       DirectUnprotectMemory(page, PageSize, true);
     }
   }
 }
 
 void*
-AllocateMemoryTryAddress(void* aAddress, size_t aSize, AllocatedMemoryKind aKind)
+AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind)
 {
   MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
   aSize = RoundupSizeToPageBoundary(aSize);
 
   if (gMemoryInfo) {
-    gMemoryInfo->mMemoryBalance[aKind] += aSize;
+    gMemoryInfo->mMemoryBalance[(size_t) aKind] += aSize;
   }
 
   if (HasSavedCheckpoint()) {
     if (void* res = FreeRegionSet::Get(aKind).Extract(aAddress, aSize)) {
       return res;
     }
   }
 
   void* res = DirectAllocateMemory(aAddress, aSize);
   RegisterAllocatedMemory(res, aSize, aKind);
   return res;
 }
 
-extern "C" {
-
-MOZ_EXPORT void*
-RecordReplayInterface_AllocateMemory(size_t aSize, AllocatedMemoryKind aKind)
+void*
+AllocateMemory(size_t aSize, MemoryKind aKind)
 {
   if (!IsReplaying()) {
     return DirectAllocateMemory(nullptr, aSize);
   }
   return AllocateMemoryTryAddress(nullptr, aSize, aKind);
 }
 
-MOZ_EXPORT void
-RecordReplayInterface_DeallocateMemory(void* aAddress, size_t aSize, AllocatedMemoryKind aKind)
+void
+DeallocateMemory(void* aAddress, size_t aSize, MemoryKind aKind)
 {
   // Round the supplied region to the containing page boundaries.
   aSize += (uint8_t*) aAddress - PageBase(aAddress);
   aAddress = PageBase(aAddress);
   aSize = RoundupSizeToPageBoundary(aSize);
 
   if (!aAddress || !aSize) {
     return;
   }
 
   if (gMemoryInfo) {
-    gMemoryInfo->mMemoryBalance[aKind] -= aSize;
+    gMemoryInfo->mMemoryBalance[(size_t) aKind] -= aSize;
   }
 
   // Memory is returned to the system before saving the first checkpoint.
   if (!HasSavedCheckpoint()) {
-    if (IsReplaying() && aKind != TrackedMemoryKind) {
+    if (IsReplaying() && aKind != MemoryKind::Tracked) {
       RemoveInitialUntrackedRegion((uint8_t*) aAddress, aSize);
     }
     DirectDeallocateMemory(aAddress, aSize);
     return;
   }
 
-  if (aKind == TrackedMemoryKind) {
+  if (aKind == MemoryKind::Tracked) {
     // For simplicity, all free regions must be executable, so ignore deallocated
     // memory in regions that are not executable.
     bool executable;
     if (!IsTrackedAddress(aAddress, &executable) || !executable) {
       return;
     }
   }
 
   // Mark this region as free, but do not unmap it. It will become usable for
   // later allocations, but will not need to be remapped if we end up
   // rewinding to a point where this memory was in use.
   FreeRegionSet::Get(aKind).Insert(aAddress, aSize);
 }
 
-} // extern "C"
-
 ///////////////////////////////////////////////////////////////////////////////
 // Snapshot Threads
 ///////////////////////////////////////////////////////////////////////////////
 
 // While on a snapshot thread, restore the contents of all pages belonging to
 // this thread which were modified since the last recorded diff snapshot.
 static void
 SnapshotThreadRestoreLastDiffSnapshot(SnapshotThreadWorklist* aWorklist)
@@ -1268,17 +1263,17 @@ AddDirtyPageToWorklist(uint8_t* aAddress
 ///////////////////////////////////////////////////////////////////////////////
 // Snapshot Interface
 ///////////////////////////////////////////////////////////////////////////////
 
 void
 InitializeMemorySnapshots()
 {
   MOZ_RELEASE_ASSERT(gMemoryInfo == nullptr);
-  void* memory = AllocateMemory(sizeof(MemoryInfo), UntrackedMemoryKind::Generic);
+  void* memory = AllocateMemory(sizeof(MemoryInfo), MemoryKind::Generic);
   gMemoryInfo = new(memory) MemoryInfo();
 
   // Mark gMemoryInfo as untracked. See AddInitialUntrackedMemoryRegion.
   AddInitialUntrackedMemoryRegion(reinterpret_cast<uint8_t*>(memory), sizeof(MemoryInfo));
 }
 
 void
 InitializeCountdownThread()
--- a/toolkit/recordreplay/MemorySnapshot.h
+++ b/toolkit/recordreplay/MemorySnapshot.h
@@ -35,21 +35,21 @@ namespace recordreplay {
 void CheckFixedMemory(void* aAddress, size_t aSize);
 
 // After marking a block of memory in a fixed allocation as non-writable,
 // restore writability to any dirty pages in the range.
 void RestoreWritableFixedMemory(void* aAddress, size_t aSize);
 
 // Allocate memory, trying to use a specific address if provided but only if
 // it is free.
-void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, AllocatedMemoryKind aKind);
+void* AllocateMemoryTryAddress(void* aAddress, size_t aSize, MemoryKind aKind);
 
 // Note a range of memory that was just allocated from the system, and the
 // kind of memory allocation that was performed.
-void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, AllocatedMemoryKind aKind);
+void RegisterAllocatedMemory(void* aBaseAddress, size_t aSize, MemoryKind aKind);
 
 // Initialize the memory snapshots system.
 void InitializeMemorySnapshots();
 
 // Take the first heap memory snapshot.
 void TakeFirstMemorySnapshot();
 
 // Take a differential heap memory snapshot compared to the last one,
--- a/toolkit/recordreplay/ProcessRecordReplay.cpp
+++ b/toolkit/recordreplay/ProcessRecordReplay.cpp
@@ -1,17 +1,17 @@
 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ProcessRecordReplay.h"
 
-#include "ipc/ChildIPC.h"
+#include "ipc/ChildInternal.h"
 #include "mozilla/Compression.h"
 #include "mozilla/Maybe.h"
 #include "mozilla/Sprintf.h"
 #include "mozilla/StackWalk.h"
 #include "mozilla/StaticMutex.h"
 #include "DirtyMemoryHandler.h"
 #include "Lock.h"
 #include "MemorySnapshot.h"
@@ -216,17 +216,17 @@ static size_t gNumEndpoints;
 
 void
 FlushRecording()
 {
   MOZ_RELEASE_ASSERT(IsRecording());
   MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
 
   // Save the endpoint of the recording.
-  JS::replay::ExecutionPoint endpoint = JS::replay::hooks.getRecordingEndpoint();
+  js::ExecutionPoint endpoint = navigation::GetRecordingEndpoint();
   Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
   endpointStream->WriteScalar(++gNumEndpoints);
   endpointStream->WriteBytes(&endpoint, sizeof(endpoint));
 
   gRecordingFile->PreventStreamWrites();
 
   gRecordingFile->Flush();
 
@@ -269,20 +269,20 @@ HitRecordingEndpoint()
   // The debugger will call this method in a loop, so we don't have to do
   // anything fancy to try to get the most up to date endpoint. As long as we
   // can make some progress in attempting to find a later endpoint, we can
   // return control to the debugger.
 
   // Check if there is a new endpoint in the endpoint data stream.
   Stream* endpointStream = gRecordingFile->OpenStream(StreamName::Main, 0);
   if (!endpointStream->AtEnd()) {
-    JS::replay::ExecutionPoint endpoint;
+    js::ExecutionPoint endpoint;
     size_t index = endpointStream->ReadScalar();
     endpointStream->ReadBytes(&endpoint, sizeof(endpoint));
-    JS::replay::hooks.setRecordingEndpoint(index, endpoint);
+    navigation::SetRecordingEndpoint(index, endpoint);
     return true;
   }
 
   // Check if there is more data in the recording.
   if (LoadNextRecordingIndex()) {
     return true;
   }
 
@@ -303,36 +303,32 @@ HitEndOfRecording()
     MOZ_RELEASE_ASSERT(found);
   } else {
     // Non-main threads may wait until more recording data is loaded by the
     // main thread.
     Thread::Wait();
   }
 }
 
-extern "C" {
-
-MOZ_EXPORT bool
-RecordReplayInterface_SpewEnabled()
+bool
+SpewEnabled()
 {
   return gSpewEnabled;
 }
 
-MOZ_EXPORT void
-RecordReplayInterface_InternalPrint(const char* aFormat, va_list aArgs)
+void
+InternalPrint(const char* aFormat, va_list aArgs)
 {
   char buf1[2048];
   VsprintfLiteral(buf1, aFormat, aArgs);
   char buf2[2048];
   SprintfLiteral(buf2, "Spew[%d]: %s", gPid, buf1);
   DirectPrint(buf2);
 }
 
-} // extern "C"
-
 ///////////////////////////////////////////////////////////////////////////////
 // Record/Replay Assertions
 ///////////////////////////////////////////////////////////////////////////////
 
 struct StackWalkData
 {
   char* mBuf;
   size_t mSize;
--- a/toolkit/recordreplay/ProcessRedirectDarwin.cpp
+++ b/toolkit/recordreplay/ProcessRedirectDarwin.cpp
@@ -633,31 +633,31 @@ RR_mmap(void* aAddress, size_t aSize, in
       if (!HasSavedCheckpoint()) {
         // Make sure this memory region is writable.
         OriginalCall(mprotect, int, aAddress, aSize, PROT_READ | PROT_WRITE | PROT_EXEC);
       }
       memset(aAddress, 0, aSize);
       memory = aAddress;
     } else {
       memory = AllocateMemoryTryAddress(aAddress, RoundupSizeToPageBoundary(aSize),
-                                        TrackedMemoryKind);
+                                        MemoryKind::Tracked);
     }
   } else {
     // We have to call mmap itself, which can change memory protection flags
     // for memory that is already allocated. If we haven't saved a checkpoint
     // then this is no problem, but after saving a checkpoint we have to make
     // sure that protection flags are what we expect them to be.
     int newProt = HasSavedCheckpoint() ? (PROT_READ | PROT_EXEC) : aProt;
     memory = OriginalCall(mmap, void*, aAddress, aSize, newProt, aFlags, aFd, aOffset);
 
     if (aFlags & MAP_FIXED) {
       MOZ_RELEASE_ASSERT(memory == aAddress);
       RestoreWritableFixedMemory(memory, RoundupSizeToPageBoundary(aSize));
     } else if (memory && memory != (void*)-1) {
-      RegisterAllocatedMemory(memory, RoundupSizeToPageBoundary(aSize), TrackedMemoryKind);
+      RegisterAllocatedMemory(memory, RoundupSizeToPageBoundary(aSize), MemoryKind::Tracked);
     }
   }
 
   if (!(aFlags & MAP_ANON) && !AreThreadEventsPassedThrough()) {
     // Include the data just mapped in the recording.
     MOZ_RELEASE_ASSERT(memory && memory != (void*)-1);
     RecordReplayAssert("mmap");
     MOZ_RELEASE_ASSERT(aSize == RecordReplayValue(aSize));
@@ -665,17 +665,17 @@ RR_mmap(void* aAddress, size_t aSize, in
   }
 
   return memory;
 }
 
 static ssize_t
 RR_munmap(void* aAddress, size_t aSize)
 {
-  DeallocateMemory(aAddress, aSize, TrackedMemoryKind);
+  DeallocateMemory(aAddress, aSize, MemoryKind::Tracked);
   return 0;
 }
 
 static ssize_t
 RR_read(int aFd, void* aBuf, size_t aCount)
 {
   RecordReplayFunction(read, ssize_t, aFd, aBuf, aCount);
   if (RecordOrReplayHadErrorNegative(rrf)) {
@@ -1500,24 +1500,24 @@ RR_sandbox_init_with_parameters(const ch
   return OriginalCall(sandbox_init_with_parameters, ssize_t,
                       aProfile, aFlags, aParameters, aErrorBuf);
 }
 
 static kern_return_t
 RR_mach_vm_allocate(vm_map_t aTarget, mach_vm_address_t* aAddress,
                     mach_vm_size_t aSize, int aFlags)
 {
-  *aAddress = (mach_vm_address_t) AllocateMemory(aSize, TrackedMemoryKind);
+  *aAddress = (mach_vm_address_t) AllocateMemory(aSize, MemoryKind::Tracked);
   return KERN_SUCCESS;
 }
 
 static kern_return_t
 RR_mach_vm_deallocate(vm_map_t aTarget, mach_vm_address_t aAddress, mach_vm_size_t aSize)
 {
-  DeallocateMemory((void*) aAddress, aSize, TrackedMemoryKind);
+  DeallocateMemory((void*) aAddress, aSize, MemoryKind::Tracked);
   return KERN_SUCCESS;
 }
 
 static kern_return_t
 RR_mach_vm_protect(vm_map_t aTarget, mach_vm_address_t aAddress, mach_vm_size_t aSize,
                    boolean_t aSetMaximum, vm_prot_t aNewProtection)
 {
   if (!HasSavedCheckpoint()) {
--- a/toolkit/recordreplay/ProcessRewind.cpp
+++ b/toolkit/recordreplay/ProcessRewind.cpp
@@ -2,27 +2,24 @@
 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #include "ProcessRewind.h"
 
 #include "nsString.h"
+#include "ipc/ChildInternal.h"
 #include "mozilla/dom/ScriptSettings.h"
 #include "mozilla/StaticMutex.h"
 #include "InfallibleVector.h"
 #include "MemorySnapshot.h"
 #include "Monitor.h"
 #include "ProcessRecordReplay.h"
 #include "ThreadSnapshot.h"
-#include "prcvar.h"
-
-#include <setjmp.h>
-#include <sys/time.h>
 
 namespace mozilla {
 namespace recordreplay {
 
 // Information about the current rewinding state. The contents of this structure
 // are in untracked memory.
 struct RewindInfo
 {
@@ -30,52 +27,50 @@ struct RewindInfo
   CheckpointId mLastCheckpoint;
 
   // Whether this is the active child process. See the comment under
   // 'Child Roles' in ParentIPC.cpp.
   bool mIsActiveChild;
 
   // Checkpoints which have been saved. This includes only entries from
   // mShouldSaveCheckpoints, plus all temporary checkpoints.
-  InfallibleVector<SavedCheckpoint, 1024, AllocPolicy<UntrackedMemoryKind::Generic>> mSavedCheckpoints;
+  InfallibleVector<SavedCheckpoint, 1024, AllocPolicy<MemoryKind::Generic>> mSavedCheckpoints;
 
   // Unsorted list of checkpoints which the middleman has instructed us to
   // save. All those equal to or prior to mLastCheckpoint will have been saved.
-  InfallibleVector<size_t, 1024, AllocPolicy<UntrackedMemoryKind::Generic>> mShouldSaveCheckpoints;
+  InfallibleVector<size_t, 1024, AllocPolicy<MemoryKind::Generic>> mShouldSaveCheckpoints;
 };
 
 static RewindInfo* gRewindInfo;
 
 // Lock for managing pending main thread callbacks.
 static Monitor* gMainThreadCallbackMonitor;
 
 // Callbacks to execute on the main thread, in FIFO order. Protected by
 // gMainThreadCallbackMonitor.
 static StaticInfallibleVector<std::function<void()>> gMainThreadCallbacks;
 
 void
 InitializeRewindState()
 {
   MOZ_RELEASE_ASSERT(gRewindInfo == nullptr);
-  void* memory = AllocateMemory(sizeof(RewindInfo), UntrackedMemoryKind::Generic);
+  void* memory = AllocateMemory(sizeof(RewindInfo), MemoryKind::Generic);
   gRewindInfo = new(memory) RewindInfo();
 
   gMainThreadCallbackMonitor = new Monitor();
 }
 
 static bool
 CheckpointPrecedes(const CheckpointId& aFirst, const CheckpointId& aSecond)
 {
   return aFirst.mNormal < aSecond.mNormal || aFirst.mTemporary < aSecond.mTemporary;
 }
 
-extern "C" {
-
-MOZ_EXPORT void
-RecordReplayInterface_RestoreCheckpointAndResume(const CheckpointId& aCheckpoint)
+void
+RestoreCheckpointAndResume(const CheckpointId& aCheckpoint)
 {
   MOZ_RELEASE_ASSERT(IsReplaying());
   MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
   MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
   MOZ_RELEASE_ASSERT(aCheckpoint == gRewindInfo->mLastCheckpoint ||
                      CheckpointPrecedes(aCheckpoint, gRewindInfo->mLastCheckpoint));
 
   // Make sure we don't lose pending main thread callbacks due to rewinding.
@@ -110,56 +105,34 @@ RecordReplayInterface_RestoreCheckpointA
             (end - start) / 1000000.0);
 
   // Finally, let threads restore themselves to their stacks at the checkpoint
   // we are rewinding to.
   RestoreAllThreads(gRewindInfo->mSavedCheckpoints.back());
   Unreachable();
 }
 
-static BeforeCheckpointHook gBeforeCheckpointHook;
-static AfterCheckpointHook gAfterCheckpointHook;
-
-MOZ_EXPORT void
-RecordReplayInterface_SetCheckpointHooks(BeforeCheckpointHook aBeforeCheckpoint,
-                                         AfterCheckpointHook aAfterCheckpoint)
-{
-  gBeforeCheckpointHook = aBeforeCheckpoint;
-  gAfterCheckpointHook = aAfterCheckpoint;
-}
-
-} // extern "C"
-
 void
 SetSaveCheckpoint(size_t aCheckpoint, bool aSave)
 {
   MOZ_RELEASE_ASSERT(aCheckpoint > gRewindInfo->mLastCheckpoint.mNormal);
   VectorAddOrRemoveEntry(gRewindInfo->mShouldSaveCheckpoints, aCheckpoint, aSave);
 }
 
-extern "C" {
-
-// Mark a checkpoint, which we might or might not save.
-MOZ_EXPORT bool
-RecordReplayInterface_NewCheckpoint(bool aTemporary)
+bool
+NewCheckpoint(bool aTemporary)
 {
   MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
   MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
   MOZ_RELEASE_ASSERT(IsReplaying() || !aTemporary);
 
-  gBeforeCheckpointHook();
+  navigation::BeforeCheckpoint();
 
   // Get the ID of the new checkpoint.
-  CheckpointId checkpoint = gRewindInfo->mLastCheckpoint;
-  if (aTemporary) {
-    checkpoint.mTemporary++;
-  } else {
-    checkpoint.mNormal++;
-    checkpoint.mTemporary = 0;
-  }
+  CheckpointId checkpoint = gRewindInfo->mLastCheckpoint.NextCheckpoint(aTemporary);
 
   // Save all checkpoints the middleman tells us to, and temporary checkpoints
   // (which the middleman never knows about).
   bool save = aTemporary
            || VectorContains(gRewindInfo->mShouldSaveCheckpoints, checkpoint.mNormal);
   bool reachedCheckpoint = true;
 
   if (save) {
@@ -197,58 +170,50 @@ RecordReplayInterface_NewCheckpoint(bool
       WaitForIdleThreadsToRestoreTheirStacks();
     }
 
     Thread::ResumeIdleThreads();
   }
 
   gRewindInfo->mLastCheckpoint = checkpoint;
 
-  AutoDisallowThreadEvents disallow;
-
-  dom::AutoJSAPI jsapi;
-  jsapi.Init();
-  gAfterCheckpointHook(checkpoint);
+  navigation::AfterCheckpoint(checkpoint);
 
   return reachedCheckpoint;
 }
 
 static bool gRecordingDiverged;
 static bool gUnhandledDivergeAllowed;
 
-MOZ_EXPORT void
-RecordReplayInterface_DivergeFromRecording()
+void
+DivergeFromRecording()
 {
   MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
   MOZ_RELEASE_ASSERT(IsReplaying());
   gRecordingDiverged = true;
   gUnhandledDivergeAllowed = true;
 }
 
+extern "C" {
+
 MOZ_EXPORT bool
 RecordReplayInterface_InternalHasDivergedFromRecording()
 {
   return Thread::CurrentIsMainThread() && gRecordingDiverged;
 }
 
-MOZ_EXPORT void
-RecordReplayInterface_DisallowUnhandledDivergeFromRecording()
+} // extern "C"
+
+void
+DisallowUnhandledDivergeFromRecording()
 {
   MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
   gUnhandledDivergeAllowed = false;
 }
 
-MFBT_API void
-RecordReplayInterface_SaveTemporaryCheckpoint()
-{
-  NewCheckpoint(/* aTemporary = */ true);
-}
-
-} // extern "C"
-
 void
 EnsureNotDivergedFromRecording()
 {
   MOZ_RELEASE_ASSERT(!AreThreadEventsPassedThrough());
   if (HasDivergedFromRecording()) {
     MOZ_RELEASE_ASSERT(gUnhandledDivergeAllowed);
     PrintSpew("Unhandled recording divergence, restoring checkpoint...\n");
     RestoreCheckpointAndResume(gRewindInfo->mSavedCheckpoints.back().mCheckpoint);
@@ -330,28 +295,24 @@ PauseMainThreadAndInvokeCallback(const s
     gMainThreadCallbackMonitor->Notify();
   }
 
   if (Thread::CurrentIsMainThread()) {
     PauseMainThreadAndServiceCallbacks();
   }
 }
 
-extern "C" {
-
-MOZ_EXPORT void
-RecordReplayInterface_ResumeExecution()
+void
+ResumeExecution()
 {
   MonitorAutoLock lock(*gMainThreadCallbackMonitor);
   gMainThreadShouldPause = false;
   gMainThreadCallbackMonitor->Notify();
 }
 
-} // extern "C"
-
 void
 SetIsActiveChild(bool aActive)
 {
   gRewindInfo->mIsActiveChild = aActive;
 }
 
 bool
 IsActiveChild()
--- a/toolkit/recordreplay/ThreadSnapshot.cpp
+++ b/toolkit/recordreplay/ThreadSnapshot.cpp
@@ -52,30 +52,30 @@ struct ThreadState {
 // state when it is no longer idle. This also stores restore info for the
 // main thread, which immediately updates its state when restoring checkpoints.
 static ThreadState* gThreadState;
 
 void
 InitializeThreadSnapshots(size_t aNumThreads)
 {
   gThreadState = (ThreadState*) AllocateMemory(aNumThreads * sizeof(ThreadState),
-                                               UntrackedMemoryKind::ThreadSnapshot);
+                                               MemoryKind::ThreadSnapshot);
 
   jmp_buf buf;
   if (setjmp(buf) == 0) {
     longjmp(buf, 1);
   }
   ThreadYield();
 }
 
 static void
 ClearThreadState(ThreadState* aInfo)
 {
   MOZ_RELEASE_ASSERT(aInfo->mShouldRestore);
-  DeallocateMemory(aInfo->mStackContents, aInfo->mStackBytes, UntrackedMemoryKind::ThreadSnapshot);
+  DeallocateMemory(aInfo->mStackContents, aInfo->mStackBytes, MemoryKind::ThreadSnapshot);
   aInfo->mShouldRestore = false;
   aInfo->mStackContents = nullptr;
   aInfo->mStackBytes = 0;
 }
 
 extern "C" {
 
 extern int
@@ -216,17 +216,17 @@ SaveThreadStack(SavedThreadStack& aStack
 
   uint8_t* stackPointer = (uint8_t*) info.mStackPointer;
   uint8_t* stackTop = thread->StackBase() + thread->StackSize();
   MOZ_RELEASE_ASSERT(stackTop >= stackPointer);
   size_t stackBytes = stackTop - stackPointer;
 
   MOZ_RELEASE_ASSERT(stackBytes >= info.mStackTopBytes);
 
-  aStack.mStack = (uint8_t*) AllocateMemory(stackBytes, UntrackedMemoryKind::ThreadSnapshot);
+  aStack.mStack = (uint8_t*) AllocateMemory(stackBytes, MemoryKind::ThreadSnapshot);
   aStack.mStackBytes = stackBytes;
 
   MemoryMove(aStack.mStack, info.mStackTop, info.mStackTopBytes);
   MemoryMove(aStack.mStack + info.mStackTopBytes,
              stackPointer + info.mStackTopBytes, stackBytes - info.mStackTopBytes);
 }
 
 static void
@@ -236,17 +236,17 @@ RestoreStackForLoadingByThread(const Sav
   MOZ_RELEASE_ASSERT(!info.mShouldRestore);
 
   info.mStackPointer = aStack.mStackPointer;
   MemoryMove(info.mRegisters, aStack.mRegisters, sizeof(jmp_buf));
 
   info.mStackBytes = aStack.mStackBytes;
 
   uint8_t* stackContents =
-    (uint8_t*) AllocateMemory(info.mStackBytes, UntrackedMemoryKind::ThreadSnapshot);
+    (uint8_t*) AllocateMemory(info.mStackBytes, MemoryKind::ThreadSnapshot);
   MemoryMove(stackContents, aStack.mStack, aStack.mStackBytes);
   info.mStackContents = stackContents;
   info.mShouldRestore = true;
 }
 
 bool
 ShouldRestoreThreadStack(size_t aId)
 {
--- a/toolkit/recordreplay/ThreadSnapshot.h
+++ b/toolkit/recordreplay/ThreadSnapshot.h
@@ -3,16 +3,17 @@
 /* This Source Code Form is subject to the terms of the Mozilla Public
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_recordreplay_ThreadSnapshot_h
 #define mozilla_recordreplay_ThreadSnapshot_h
 
 #include "File.h"
+#include "ProcessRewind.h"
 #include "Thread.h"
 
 namespace mozilla {
 namespace recordreplay {
 
 // Thread Snapshots Overview.
 //
 // The functions below are used when a thread saves or restores its stack and
@@ -65,17 +66,17 @@ struct SavedThreadStack
 
   SavedThreadStack()
   {
     PodZero(this);
   }
 
   void ReleaseContents() {
     if (mStackBytes) {
-      DeallocateMemory(mStack, mStackBytes, UntrackedMemoryKind::ThreadSnapshot);
+      DeallocateMemory(mStack, mStackBytes, MemoryKind::ThreadSnapshot);
     }
   }
 };
 
 struct SavedCheckpoint
 {
   CheckpointId mCheckpoint;
   SavedThreadStack mStacks[MaxRecordedThreadId];
--- a/toolkit/recordreplay/ipc/Channel.h
+++ b/toolkit/recordreplay/ipc/Channel.h
@@ -4,21 +4,21 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this file,
  * You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef mozilla_recordreplay_Channel_h
 #define mozilla_recordreplay_Channel_h
 
 #include "base/process.h"
 
-#include "js/ReplayHooks.h"
 #include "mozilla/gfx/Types.h"
 #include "mozilla/Maybe.h"
 
 #include "File.h"
+#include "JSControl.h"
 #include "Monitor.h"
 
 namespace mozilla {
 namespace recordreplay {
 
 // This file has definitions for creating and communicating on a special
 // bidirectional channel between a middleman process and a recording or
 // replaying process. This communication is not included in the recording, and
@@ -255,19 +255,19 @@ typedef JSONMessage<MessageType::Debugge
 
 struct SetBreakpointMessage : public Message
 {
   // ID of the breakpoint to change.
   size_t mId;
 
   // New position of the breakpoint. If this is invalid then the breakpoint is
   // being cleared.
-  JS::replay::ExecutionPosition mPosition;
+  js::BreakpointPosition mPosition;
 
-  SetBreakpointMessage(size_t aId, const JS::replay::ExecutionPosition& aPosition)
+  SetBreakpointMessage(size_t aId, const js::BreakpointPosition& aPosition)
     : Message(MessageType::SetBreakpoint, sizeof(*this))
     , mId(aId)
     , mPosition(aPosition)
   {}
 };
 
 struct ResumeMessage : public Message
 {
@@ -420,17 +420,17 @@ private:
 
   // Descriptor used to communicate with the other side.
   int mFd;
 
   // For synchronizing initialization of the channel.
   Monitor mMonitor;
 
   // Buffer for message data received from the other side of the channel.
-  InfallibleVector<char, 0, AllocPolicy<UntrackedMemoryKind::Generic>> mMessageBuffer;
+  InfallibleVector<char, 0, AllocPolicy<MemoryKind::Generic>> mMessageBuffer;
 
   // The number of bytes of data already in the message buffer.
   size_t mMessageBytes;
 
   // If spew is enabled, print a message and associated info to stderr.
   void PrintMessage(const char* aPrefix, const Message& aMsg);
 
   // Block until a complete message is received from the other side of the
--- a/toolkit/recordreplay/ipc/ChildProcess.cpp
+++ b/toolkit/recordreplay/ipc/ChildProcess.cpp
@@ -128,19 +128,18 @@ ChildProcessInfo::IsPausedAtMatchingBrea
     for (Message* msg : mMessages) {
       if (msg->mType == MessageType::SetBreakpoint) {
         SetBreakpointMessage* nmsg = static_cast<SetBreakpointMessage*>(msg);
         if (nmsg->mId == breakpointId) {
           lastSet = nmsg;
         }
       }
     }
-    MOZ_RELEASE_ASSERT(lastSet &&
-                       lastSet->mPosition.kind != JS::replay::ExecutionPosition::Invalid);
-    if (aFilter(lastSet->mPosition.kind)) {
+    MOZ_RELEASE_ASSERT(lastSet && lastSet->mPosition.IsValid());
+    if (aFilter(lastSet->mPosition.mKind)) {
       return true;
     }
   }
 
   return false;
 }
 
 void
@@ -304,17 +303,17 @@ ChildProcessInfo::Recover(bool aPaused, 
 
   size_t mostRecentCheckpoint = MostRecentCheckpoint();
   bool pausedAtCheckpoint = IsPausedAtCheckpoint();
 
   // Clear out all messages that have been sent to this process.
   for (Message* msg : mMessages) {
     if (msg->mType == MessageType::SetBreakpoint) {
       SetBreakpointMessage* nmsg = static_cast<SetBreakpointMessage*>(msg);
-      SendMessageRaw(SetBreakpointMessage(nmsg->mId, JS::replay::ExecutionPosition()));
+      SendMessageRaw(SetBreakpointMessage(nmsg->mId, js::BreakpointPosition()));
     }
     free(msg);
   }
   mMessages.clear();
 
   mPaused = aPaused;
   mPausedMessage = aPausedMessage;
   mLastCheckpoint = aLastCheckpoint;
--- a/toolkit/recordreplay/ipc/ParentIPC.cpp
+++ b/toolkit/recordreplay/ipc/ParentIPC.cpp
@@ -756,20 +756,20 @@ HasSavedCheckpointsInRange(ChildProcessI
   return true;
 }
 
 // Return whether a child is paused at a breakpoint set by the user or by
 // stepping around, at which point the debugger will send requests to the
 // child to inspect its state. This excludes breakpoints set for things
 // internal to the debugger.
 static bool
-IsUserBreakpoint(JS::replay::ExecutionPosition::Kind aKind)
+IsUserBreakpoint(js::BreakpointPosition::Kind aKind)
 {
-  MOZ_RELEASE_ASSERT(aKind != JS::replay::ExecutionPosition::Invalid);
-  return aKind != JS::replay::ExecutionPosition::NewScript;
+  MOZ_RELEASE_ASSERT(aKind != js::BreakpointPosition::Invalid);
+  return aKind != js::BreakpointPosition::NewScript;
 }
 
 static void
 MarkActiveChildExplicitPause()
 {
   MOZ_RELEASE_ASSERT(gActiveChild->IsPaused());
   size_t targetCheckpoint = gActiveChild->RewindTargetCheckpoint();
 
@@ -824,19 +824,16 @@ ActiveChildIsPausedOrRewinding()
 static MessageLoop* gMainThreadMessageLoop;
 
 MessageLoop*
 MainThreadMessageLoop()
 {
   return gMainThreadMessageLoop;
 }
 
-// Initialize hooks used by the debugger.
-static void InitDebuggerHooks();
-
 // Contents of the prefs shmem block that is sent to the child on startup.
 static char* gShmemPrefs;
 static size_t gShmemPrefsLen;
 
 void
 NotePrefsShmemContents(char* aPrefs, size_t aPrefsLen)
 {
   MOZ_RELEASE_ASSERT(!gShmemPrefs);
@@ -854,17 +851,16 @@ InitializeMiddleman(int aArgc, char* aAr
   // Construct the message that will be sent to each child when starting up.
   IntroductionMessage* msg =
     IntroductionMessage::New(aParentPid, gShmemPrefs, gShmemPrefsLen, aArgc, aArgv);
   ChildProcessInfo::SetIntroductionMessage(msg);
 
   MOZ_RELEASE_ASSERT(gProcessKind == ProcessKind::MiddlemanRecording ||
                      gProcessKind == ProcessKind::MiddlemanReplaying);
 
-  InitDebuggerHooks();
   InitializeGraphicsMemory();
 
   gMonitor = new Monitor();
 
   gMainThreadMessageLoop = MessageLoop::current();
 
   if (gProcessKind == ProcessKind::MiddlemanRecording) {
     SpawnRecordingChild();
@@ -877,29 +873,27 @@ InitializeMiddleman(int aArgc, char* aAr
   gRecordingFd = DirectOpenFile(gRecordingFilename, false);
 }
 
 ///////////////////////////////////////////////////////////////////////////////
 // Debugger Messages
 ///////////////////////////////////////////////////////////////////////////////
 
 // Buffer for receiving the next debugger response.
-static JS::replay::CharBuffer* gResponseBuffer;
+static js::CharBuffer* gResponseBuffer;
 
 static void
 RecvDebuggerResponse(const DebuggerResponseMessage& aMsg)
 {
   MOZ_RELEASE_ASSERT(gResponseBuffer && gResponseBuffer->empty());
-  if (!gResponseBuffer->append(aMsg.Buffer(), aMsg.BufferSize())) {
-    MOZ_CRASH("RecvDebuggerResponse");
-  }
+  gResponseBuffer->append(aMsg.Buffer(), aMsg.BufferSize());
 }
 
-static void
-HookDebuggerRequest(const JS::replay::CharBuffer& aBuffer, JS::replay::CharBuffer* aResponse)
+void
+SendRequest(const js::CharBuffer& aBuffer, js::CharBuffer* aResponse)
 {
   MaybeCreateCheckpointInRecordingChild();
   gActiveChild->WaitUntilPaused();
 
   MOZ_RELEASE_ASSERT(!gResponseBuffer);
   gResponseBuffer = aResponse;
 
   DebuggerRequestMessage* msg = DebuggerRequestMessage::New(aBuffer.begin(), aBuffer.length());
@@ -908,18 +902,18 @@ HookDebuggerRequest(const JS::replay::Ch
 
   // Wait for the child to respond to the query.
   gActiveChild->WaitUntilPaused();
   MOZ_RELEASE_ASSERT(gResponseBuffer == aResponse);
   MOZ_RELEASE_ASSERT(gResponseBuffer->length() != 0);
   gResponseBuffer = nullptr;
 }
 
-static void
-HookSetBreakpoint(size_t aId, const JS::replay::ExecutionPosition& aPosition)
+void
+SetBreakpoint(size_t aId, const js::BreakpointPosition& aPosition)
 {
   MaybeCreateCheckpointInRecordingChild();
   gActiveChild->WaitUntilPaused();
 
   gActiveChild->SendMessage(SetBreakpointMessage(aId, aPosition));
 
   // Also set breakpoints in any recording child that is not currently active.
   // We can't recover recording processes so need to keep their breakpoints up
@@ -933,18 +927,18 @@ HookSetBreakpoint(size_t aId, const JS::
 // according to the last direction we were explicitly given.
 static bool gChildExecuteForward = true;
 static bool gChildExecuteBackward = false;
 
 // Whether there is a ResumeForwardOrBackward task which should execute on the
 // main thread. This will continue execution in the preferred direction.
 static bool gResumeForwardOrBackward = false;
 
-static void
-HookResume(bool aForward)
+void
+Resume(bool aForward)
 {
   gActiveChild->WaitUntilPaused();
 
   // Set the preferred direction of travel.
   gResumeForwardOrBackward = false;
   gChildExecuteForward = aForward;
   gChildExecuteBackward = !aForward;
 
@@ -995,18 +989,18 @@ HookResume(bool aForward)
 
     // Idle children might change their behavior as we run forward.
     PokeChildren();
   }
 
   gActiveChild->SendMessage(ResumeMessage(aForward));
 }
 
-static void
-HookPause()
+void
+Pause()
 {
   MaybeCreateCheckpointInRecordingChild();
   gActiveChild->WaitUntilPaused();
 
   // If the debugger has explicitly paused then there is no preferred direction
   // of travel.
   gChildExecuteForward = false;
   gChildExecuteBackward = false;
@@ -1015,31 +1009,31 @@ HookPause()
 }
 
 static void
 ResumeForwardOrBackward()
 {
   MOZ_RELEASE_ASSERT(!gChildExecuteForward || !gChildExecuteBackward);
 
   if (gResumeForwardOrBackward && (gChildExecuteForward || gChildExecuteBackward)) {
-    HookResume(gChildExecuteForward);
+    Resume(gChildExecuteForward);
   }
 }
 
 static void
 RecvHitCheckpoint(const HitCheckpointMessage& aMsg)
 {
   UpdateCheckpointTimes(aMsg);
 
   // Resume either forwards or backwards. Break the resume off into a separate
   // runnable, to avoid starving any code already on the stack and waiting for
   // the process to pause. Immediately resume if the main thread is blocked.
   if (MainThreadIsWaitingForIPDLReply()) {
     MOZ_RELEASE_ASSERT(gChildExecuteForward);
-    HookResume(true);
+    Resume(true);
   } else if (!gResumeForwardOrBackward) {
     gResumeForwardOrBackward = true;
     gMainThreadMessageLoop->PostTask(NewRunnableFunction("ResumeForwardOrBackward",
                                                          ResumeForwardOrBackward));
   }
 }
 
 static void
@@ -1049,17 +1043,17 @@ HitBreakpoint(uint32_t* aBreakpoints, si
 
   MOZ_RELEASE_ASSERT(!gResumeForwardOrBackward);
   gResumeForwardOrBackward = true;
 
   // Call breakpoint handlers until one of them explicitly resumes forward or
   // backward travel.
   for (size_t i = 0; i < aNumBreakpoints && gResumeForwardOrBackward; i++) {
     AutoSafeJSContext cx;
-    if (!JS::replay::hooks.hitBreakpointMiddleman(cx, aBreakpoints[i])) {
+    if (!js::HitBreakpoint(cx, aBreakpoints[i])) {
       Print("Warning: hitBreakpoint hook threw an exception.\n");
     }
   }
 
   // If the child was not explicitly resumed by any breakpoint handler, resume
   // travel in whichever direction it was going previously.
   if (gResumeForwardOrBackward) {
     ResumeForwardOrBackward();
@@ -1072,21 +1066,11 @@ static void
 RecvHitBreakpoint(const HitBreakpointMessage& aMsg)
 {
   uint32_t* breakpoints = new uint32_t[aMsg.NumBreakpoints()];
   PodCopy(breakpoints, aMsg.Breakpoints(), aMsg.NumBreakpoints());
   gMainThreadMessageLoop->PostTask(NewRunnableFunction("HitBreakpoint", HitBreakpoint,
                                                        breakpoints, aMsg.NumBreakpoints()));
 }
 
-static void
-InitDebuggerHooks()
-{
-  JS::replay::hooks.debugRequestMiddleman = HookDebuggerRequest;
-  JS::replay::hooks.setBreakpointMiddleman = HookSetBreakpoint;
-  JS::replay::hooks.resumeMiddleman = HookResume;
-  JS::replay::hooks.pauseMiddleman = HookPause;
-  JS::replay::hooks.canRewindMiddleman = CanRewind;
-}
-
 } // namespace parent
 } // namespace recordreplay
 } // namespace mozilla