Bug 1273462 - Part 2: Add infrastructure to mark all fully used pages of AssemblerBuffer's vector as read-only (disabled by default). r=jandem
authorEmanuel Hoogeveen <emanuel.hoogeveen@gmail.com>
Fri, 27 May 2016 14:18:00 +0200
changeset 340573 9a76ad279eaa7387317d2dbdfde222f7cabcc7b1
parent 340572 dbd9ceba3964f756765cbd96d3beea7b3de0a933
child 340574 26bd1c34e1f8bc2861045e314407130a6426c711
push id1183
push userraliiev@mozilla.com
push dateMon, 05 Sep 2016 20:01:49 +0000
treeherdermozilla-release@3148731bed45 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1273462
milestone49.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1273462 - Part 2: Add infrastructure to mark all fully used pages of AssemblerBuffer's vector as read-only (disabled by default). r=jandem
js/src/ds/PageProtectingVector.h
js/src/gc/Memory.cpp
js/src/gc/Memory.h
js/src/jit/x86-shared/Assembler-x86-shared.h
js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
js/src/jit/x86-shared/BaseAssembler-x86-shared.h
new file mode 100644
--- /dev/null
+++ b/js/src/ds/PageProtectingVector.h
@@ -0,0 +1,240 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef ds_PageProtectingVector_h
+#define ds_PageProtectingVector_h
+
+#include "mozilla/Vector.h"
+
+#include "gc/Memory.h"
+
+namespace js {
+
+/*
+ * PageProtectingVector is a vector that can only grow or be cleared, and marks
+ * all of its fully used memory pages as read-only. It can be used to detect
+ * heap corruption in important buffers, since anything that tries to write
+ * into its protected pages will crash.
+ *
+ * PageProtectingVector's protection is limited to full pages. If the front
+ * of its buffer is not aligned on a page boundary, bytes preceding the first
+ * page boundary will not be protected. Similarly, the end of the buffer will
+ * not be fully protected unless it is aligned on a page boundary. Altogether,
+ * up to two pages of memory may not be protected.
+ */
+template<typename T,
+         size_t MinInlineCapacity = 0,
+         class AllocPolicy = mozilla::MallocAllocPolicy>
+class PageProtectingVector final
+{
+    mozilla::Vector<T, MinInlineCapacity, AllocPolicy> vector;
+
+    size_t pageSize;
+    size_t pageMask;
+
+    /*
+     * The number of bytes between the start of the buffer being used by
+     * |vector| and the first page we can protect. With jemalloc, this number
+     * should always be 0 for vectors with a buffer larger than |pageSize / 2|
+     * bytes, but with other allocators large buffers may not be page-aligned.
+     */
+    size_t offsetToPage;
+
+    /* The number of currently protected bytes (a multiple of pageSize). */
+    size_t protectedBytes;
+
+    /*
+     * The number of bytes that are currently unprotected, but could be.
+     * This number starts at |-offsetToPage|, since any bytes before
+     * |vector.begin() + offsetToPage| can never be protected (as we do not own
+     * the whole page). As a result, if |unprotectedBytes >= pageSize|, we know
+     * we can protect at least one more page, and |unprotectedBytes & ~pageMask|
+     * is always the number of additional bytes we can protect. Put another way,
+     * |offsetToPage + protectedBytes + unprotectedBytes == vector.length()|
+     * always holds, and if |protectedBytes != 0| then |unprotectedBytes >= 0|.
+     */
+    intptr_t unprotectedBytes;
+
+    bool protectionEnabled;
+    bool regionUnprotected;
+
+    void updateOffsetToPage() {
+        unprotectedBytes += offsetToPage;
+        offsetToPage = (pageSize - (uintptr_t(vector.begin()) & pageMask)) & pageMask;
+        unprotectedBytes -= offsetToPage;
+    }
+
+    void protect() {
+        MOZ_ASSERT(!regionUnprotected);
+        if (protectionEnabled && unprotectedBytes >= intptr_t(pageSize)) {
+            size_t toProtect = size_t(unprotectedBytes) & ~pageMask;
+            uintptr_t addr = uintptr_t(vector.begin()) + offsetToPage + protectedBytes;
+            gc::MakePagesReadOnly(reinterpret_cast<void*>(addr), toProtect);
+            unprotectedBytes -= toProtect;
+            protectedBytes += toProtect;
+        }
+    }
+
+    void unprotect() {
+        MOZ_ASSERT(!regionUnprotected);
+        MOZ_ASSERT_IF(!protectionEnabled, !protectedBytes);
+        if (protectedBytes) {
+            uintptr_t addr = uintptr_t(vector.begin()) + offsetToPage;
+            gc::UnprotectPages(reinterpret_cast<void*>(addr), protectedBytes);
+            unprotectedBytes += protectedBytes;
+            protectedBytes = 0;
+        }
+    }
+
+    void protectNewBuffer() {
+        updateOffsetToPage();
+        protect();
+    }
+
+    bool anyProtected(size_t first, size_t last) {
+        return last >= offsetToPage && first < offsetToPage + protectedBytes;
+    }
+
+    void setContainingRegion(size_t first, size_t last, uintptr_t* addr, size_t* size) {
+        if (first < offsetToPage)
+            first = offsetToPage;
+        if (last > offsetToPage + protectedBytes - 1)
+            last = offsetToPage + protectedBytes - 1;
+        uintptr_t firstAddr = uintptr_t(vector.begin());
+        uintptr_t firstPage = (firstAddr + first) & ~pageMask;
+        uintptr_t lastPage = (firstAddr + last) & ~pageMask;
+        *size = pageSize + (lastPage - firstPage);
+        *addr = firstPage;
+    }
+
+    void increaseElemsUsed(size_t used) {
+        unprotectedBytes += used * sizeof(T);
+        protect();
+    }
+
+    /* A helper class to simplify unprotecting and reprotecting when needed. */
+    class AutoUnprotect
+    {
+        PageProtectingVector* vector;
+
+      public:
+        AutoUnprotect() : vector(nullptr) {};
+
+        void emplace(PageProtectingVector* holder) {
+            vector = holder;
+            vector->unprotect();
+        }
+
+        explicit AutoUnprotect(PageProtectingVector* holder) {
+            emplace(holder);
+        }
+
+        ~AutoUnprotect() {
+            if (vector)
+                vector->protectNewBuffer();
+        }
+    };
+
+  public:
+    explicit PageProtectingVector(AllocPolicy policy = AllocPolicy())
+      : vector(policy),
+        pageSize(gc::SystemPageSize()),
+        pageMask(pageSize - 1),
+        offsetToPage(0),
+        protectedBytes(0),
+        unprotectedBytes(0),
+        protectionEnabled(false),
+        regionUnprotected(false) { updateOffsetToPage(); }
+
+    ~PageProtectingVector() { unprotect(); }
+
+    /* Enable protection for the entire buffer. */
+    void enableProtection() {
+        MOZ_ASSERT(!protectionEnabled);
+        protectionEnabled = true;
+        protectNewBuffer();
+    }
+
+    /* Disable protection for the entire buffer. */
+    void disableProtection() {
+        MOZ_ASSERT(protectionEnabled);
+        unprotect();
+        protectionEnabled = false;
+    }
+
+    /*
+     * Disable protection on the smallest number of pages containing
+     * both |firstByteOffset| and |lastByteOffset|.
+     */
+    void unprotectRegion(size_t firstByteOffset, size_t lastByteOffset) {
+        MOZ_ASSERT(!regionUnprotected);
+        regionUnprotected = true;
+        if (!protectedBytes || !anyProtected(firstByteOffset, lastByteOffset))
+            return;
+        size_t size;
+        uintptr_t addr;
+        setContainingRegion(firstByteOffset, lastByteOffset, &addr, &size);
+        gc::UnprotectPages(reinterpret_cast<void*>(addr), size);
+    }
+
+    /*
+     * Re-enable protection on the region containing
+     * |firstByteOffset| and |lastByteOffset|.
+     */
+    void reprotectRegion(size_t firstByteOffset, size_t lastByteOffset) {
+        MOZ_ASSERT(regionUnprotected);
+        regionUnprotected = false;
+        if (!protectedBytes || !anyProtected(firstByteOffset, lastByteOffset))
+            return;
+        size_t size;
+        uintptr_t addr;
+        setContainingRegion(firstByteOffset, lastByteOffset, &addr, &size);
+        gc::MakePagesReadOnly(reinterpret_cast<void*>(addr), size);
+    }
+
+    size_t length() const { return vector.length(); }
+
+    T* begin() { return vector.begin(); }
+    const T* begin() const { return vector.begin(); }
+
+    void clear() {
+        AutoUnprotect guard(this);
+        vector.clear();
+        offsetToPage = 0;
+        unprotectedBytes = 0;
+    }
+
+    MOZ_MUST_USE bool reserve(size_t size) {
+        AutoUnprotect guard;
+        if (size > vector.capacity())
+            guard.emplace(this);
+        return vector.reserve(size);
+    }
+
+    template<typename U>
+    MOZ_ALWAYS_INLINE void infallibleAppend(const U* values, size_t size) {
+        vector.infallibleAppend(values, size);
+        increaseElemsUsed(size);
+    }
+
+    template<typename U>
+    MOZ_MUST_USE bool append(const U* values, size_t size) {
+        bool ret;
+        {
+            AutoUnprotect guard;
+            if (MOZ_UNLIKELY(vector.length() + size > vector.capacity()))
+                guard.emplace(this);
+            ret = vector.append(values, size);
+        }
+        if (ret)
+            increaseElemsUsed(size);
+        return ret;
+    }
+};
+
+} /* namespace js */
+
+#endif /* ds_PageProtectingVector_h */
--- a/js/src/gc/Memory.cpp
+++ b/js/src/gc/Memory.cpp
@@ -758,24 +758,39 @@ ProtectPages(void* p, size_t size)
     MOZ_ASSERT(oldProtect == PAGE_READWRITE);
 #else  // assume Unix
     if (mprotect(p, size, PROT_NONE))
         MOZ_CRASH("mprotect(PROT_NONE) failed");
 #endif
 }
 
 void
+MakePagesReadOnly(void* p, size_t size)
+{
+    MOZ_ASSERT(size % pageSize == 0);
+#if defined(XP_WIN)
+    DWORD oldProtect;
+    if (!VirtualProtect(p, size, PAGE_READONLY, &oldProtect))
+        MOZ_CRASH("VirtualProtect(PAGE_READONLY) failed");
+    MOZ_ASSERT(oldProtect == PAGE_READWRITE);
+#else  // assume Unix
+    if (mprotect(p, size, PROT_READ))
+        MOZ_CRASH("mprotect(PROT_READ) failed");
+#endif
+}
+
+void
 UnprotectPages(void* p, size_t size)
 {
     MOZ_ASSERT(size % pageSize == 0);
 #if defined(XP_WIN)
     DWORD oldProtect;
     if (!VirtualProtect(p, size, PAGE_READWRITE, &oldProtect))
         MOZ_CRASH("VirtualProtect(PAGE_READWRITE) failed");
-    MOZ_ASSERT(oldProtect == PAGE_NOACCESS);
+    MOZ_ASSERT(oldProtect == PAGE_NOACCESS || oldProtect == PAGE_READONLY);
 #else  // assume Unix
     if (mprotect(p, size, PROT_READ | PROT_WRITE))
         MOZ_CRASH("mprotect(PROT_READ | PROT_WRITE) failed");
 #endif
 }
 
 } // namespace gc
 } // namespace js
--- a/js/src/gc/Memory.h
+++ b/js/src/gc/Memory.h
@@ -39,14 +39,15 @@ size_t GetPageFaultCount();
 void* AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
 
 // Deallocate memory mapped content.
 void DeallocateMappedContent(void* p, size_t length);
 
 void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
 
 void ProtectPages(void* p, size_t size);
+void MakePagesReadOnly(void* p, size_t size);
 void UnprotectPages(void* p, size_t size);
 
 } // namespace gc
 } // namespace js
 
 #endif /* gc_Memory_h */
--- a/js/src/jit/x86-shared/Assembler-x86-shared.h
+++ b/js/src/jit/x86-shared/Assembler-x86-shared.h
@@ -1050,23 +1050,25 @@ class AssemblerX86Shared : public Assemb
         }
     }
 
     CodeOffset callWithPatch() {
         return CodeOffset(masm.call().offset());
     }
     void patchCall(uint32_t callerOffset, uint32_t calleeOffset) {
         unsigned char* code = masm.data();
+        X86Encoding::AutoUnprotectAssemblerBufferRegion unprotect(masm, callerOffset - 4, 4);
         X86Encoding::SetRel32(code + callerOffset, code + calleeOffset);
     }
     CodeOffset thunkWithPatch() {
         return CodeOffset(masm.jmp().offset());
     }
     void patchThunk(uint32_t thunkOffset, uint32_t targetOffset) {
         unsigned char* code = masm.data();
+        X86Encoding::AutoUnprotectAssemblerBufferRegion unprotect(masm, thunkOffset - 4, 4);
         X86Encoding::SetRel32(code + thunkOffset, code + targetOffset);
     }
     static void repatchThunk(uint8_t* code, uint32_t thunkOffset, uint32_t targetOffset) {
         X86Encoding::SetRel32(code + thunkOffset, code + targetOffset);
     }
 
     void breakpoint() {
         masm.int3();
--- a/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
+++ b/js/src/jit/x86-shared/AssemblerBuffer-x86-shared.h
@@ -28,16 +28,17 @@
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef jit_x86_shared_AssemblerBuffer_x86_shared_h
 #define jit_x86_shared_AssemblerBuffer_x86_shared_h
 
 #include <stdarg.h>
 #include <string.h>
 
+#include "ds/PageProtectingVector.h"
 #include "jit/ExecutableAllocator.h"
 #include "jit/JitSpewer.h"
 
 // Spew formatting helpers.
 #define PRETTYHEX(x)                       (((x)<0)?"-":""),(((x)<0)?-(x):(x))
 
 #define MEM_o     "%s0x%x"
 #define MEM_os    MEM_o   "(,%s,%d)"
@@ -133,16 +134,26 @@ namespace jit {
             return m_oom;
         }
 
         const unsigned char* buffer() const {
             MOZ_ASSERT(!m_oom);
             return m_buffer.begin();
         }
 
+        void enableBufferProtection() { m_buffer.enableProtection(); }
+        void disableBufferProtection() { m_buffer.disableProtection(); }
+
+        void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+            m_buffer.unprotectRegion(firstByteOffset, lastByteOffset);
+        }
+        void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+            m_buffer.reprotectRegion(firstByteOffset, lastByteOffset);
+        }
+
     protected:
         /*
          * OOM handling: This class can OOM in the ensureSpace() method trying
          * to allocate a new buffer. In response to an OOM, we need to avoid
          * crashing and report the error. We also want to make it so that
          * users of this class need to check for OOM only at certain points
          * and not after every operation.
          *
@@ -153,17 +164,17 @@ namespace jit {
          *
          * See also the |buffer| method.
          */
         void oomDetected() {
             m_oom = true;
             m_buffer.clear();
         }
 
-        mozilla::Vector<unsigned char, 256, SystemAllocPolicy> m_buffer;
+        PageProtectingVector<unsigned char, 256, SystemAllocPolicy> m_buffer;
         bool m_oom;
     };
 
     class GenericAssembler
     {
         Sprinter* printer;
 
       public:
--- a/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/BaseAssembler-x86-shared.h
@@ -38,16 +38,29 @@
 
 extern volatile uintptr_t* blackbox;
 
 namespace js {
 namespace jit {
 
 namespace X86Encoding {
 
+class BaseAssembler;
+
+class AutoUnprotectAssemblerBufferRegion
+{
+    BaseAssembler* assembler;
+    size_t firstByteOffset;
+    size_t lastByteOffset;
+
+  public:
+    AutoUnprotectAssemblerBufferRegion(BaseAssembler& holder, int32_t offset, size_t size);
+    ~AutoUnprotectAssemblerBufferRegion();
+};
+
 class BaseAssembler : public GenericAssembler {
 public:
     BaseAssembler()
       : useVEX_(true)
     { }
 
     void disableVEX() { useVEX_ = false; }
 
@@ -3467,16 +3480,17 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         // its internal buffer and thus our links could be garbage.
         if (oom())
             return;
 
         assertValidJmpSrc(from);
         MOZ_RELEASE_ASSERT(to.offset() == -1 || size_t(to.offset()) <= size());
 
         unsigned char* code = m_formatter.data();
+        AutoUnprotectAssemblerBufferRegion unprotect(*this, from.offset() - 4, 4);
         SetInt32(code + from.offset(), to.offset());
     }
 
     void linkJump(JmpSrc from, JmpDst to)
     {
         MOZ_ASSERT(from.offset() != -1);
         MOZ_ASSERT(to.offset() != -1);
 
@@ -3485,28 +3499,39 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         if (oom())
             return;
 
         assertValidJmpSrc(from);
         MOZ_RELEASE_ASSERT(size_t(to.offset()) <= size());
 
         spew(".set .Lfrom%d, .Llabel%d", from.offset(), to.offset());
         unsigned char* code = m_formatter.data();
+        AutoUnprotectAssemblerBufferRegion unprotect(*this, from.offset() - 4, 4);
         SetRel32(code + from.offset(), code + to.offset());
     }
 
     void executableCopy(void* buffer)
     {
         memcpy(buffer, m_formatter.buffer(), size());
     }
     MOZ_MUST_USE bool appendBuffer(const BaseAssembler& other)
     {
         return m_formatter.append(other.m_formatter.buffer(), other.size());
     }
 
+    void enableBufferProtection() { m_formatter.enableBufferProtection(); }
+    void disableBufferProtection() { m_formatter.disableBufferProtection(); }
+
+    void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+        m_formatter.unprotectDataRegion(firstByteOffset, lastByteOffset);
+    }
+    void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+        m_formatter.reprotectDataRegion(firstByteOffset, lastByteOffset);
+    }
+
   protected:
     static bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(int8_t)value; }
     static bool CAN_SIGN_EXTEND_16_32(int32_t value) { return value == (int32_t)(int16_t)value; }
     static bool CAN_ZERO_EXTEND_8_32(int32_t value) { return value == (int32_t)(uint8_t)value; }
     static bool CAN_ZERO_EXTEND_8H_32(int32_t value) { return value == (value & 0xff00); }
     static bool CAN_ZERO_EXTEND_16_32(int32_t value) { return value == (int32_t)(uint16_t)value; }
     static bool CAN_ZERO_EXTEND_32_64(int32_t value) { return value >= 0; }
 
@@ -4637,68 +4662,68 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
 
         // Immediates:
         //
         // An immedaite should be appended where appropriate after an op has
         // been emitted.  The writes are unchecked since the opcode formatters
         // above will have ensured space.
 
         // A signed 8-bit immediate.
-        void immediate8s(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate8s(int32_t imm)
         {
             MOZ_ASSERT(CAN_SIGN_EXTEND_8_32(imm));
             m_buffer.putByteUnchecked(imm);
         }
 
         // An unsigned 8-bit immediate.
-        void immediate8u(uint32_t imm)
+        MOZ_ALWAYS_INLINE void immediate8u(uint32_t imm)
         {
             MOZ_ASSERT(CAN_ZERO_EXTEND_8_32(imm));
             m_buffer.putByteUnchecked(int32_t(imm));
         }
 
         // An 8-bit immediate with is either signed or unsigned, for use in
         // instructions which actually only operate on 8 bits.
-        void immediate8(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate8(int32_t imm)
         {
             m_buffer.putByteUnchecked(imm);
         }
 
         // A signed 16-bit immediate.
-        void immediate16s(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate16s(int32_t imm)
         {
             MOZ_ASSERT(CAN_SIGN_EXTEND_16_32(imm));
             m_buffer.putShortUnchecked(imm);
         }
 
         // An unsigned 16-bit immediate.
-        void immediate16u(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate16u(int32_t imm)
         {
             MOZ_ASSERT(CAN_ZERO_EXTEND_16_32(imm));
             m_buffer.putShortUnchecked(imm);
         }
 
         // A 16-bit immediate with is either signed or unsigned, for use in
         // instructions which actually only operate on 16 bits.
-        void immediate16(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate16(int32_t imm)
         {
             m_buffer.putShortUnchecked(imm);
         }
 
-        void immediate32(int32_t imm)
+        MOZ_ALWAYS_INLINE void immediate32(int32_t imm)
         {
             m_buffer.putIntUnchecked(imm);
         }
 
-        void immediate64(int64_t imm)
+        MOZ_ALWAYS_INLINE void immediate64(int64_t imm)
         {
             m_buffer.putInt64Unchecked(imm);
         }
 
-        MOZ_MUST_USE JmpSrc
+        MOZ_ALWAYS_INLINE MOZ_MUST_USE JmpSrc
         immediateRel32()
         {
             m_buffer.putIntUnchecked(0);
             return JmpSrc(m_buffer.size());
         }
 
         // Data:
 
@@ -4752,16 +4777,26 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
         unsigned char* data() { return m_buffer.data(); }
 
         MOZ_MUST_USE bool append(const unsigned char* values, size_t size)
         {
             return m_buffer.append(values, size);
         }
 
+        void enableBufferProtection() { m_buffer.enableBufferProtection(); }
+        void disableBufferProtection() { m_buffer.disableBufferProtection(); }
+
+        void unprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+            m_buffer.unprotectDataRegion(firstByteOffset, lastByteOffset);
+        }
+        void reprotectDataRegion(size_t firstByteOffset, size_t lastByteOffset) {
+            m_buffer.reprotectDataRegion(firstByteOffset, lastByteOffset);
+        }
+
     private:
 
         // Internals; ModRm and REX formatters.
 
         // Byte operand register spl & above requir a REX prefix, which precludes
         // use of the h registers in the same instruction.
         static bool byteRegRequiresRex(RegisterID reg)
         {
@@ -4984,14 +5019,31 @@ threeByteOpImmSimd("vblendps", VEX_PD, O
         }
 
         AssemblerBuffer m_buffer;
     } m_formatter;
 
     bool useVEX_;
 };
 
+MOZ_ALWAYS_INLINE
+AutoUnprotectAssemblerBufferRegion::AutoUnprotectAssemblerBufferRegion(BaseAssembler& holder,
+                                                                       int32_t offset, size_t size)
+{
+    assembler = &holder;
+    MOZ_ASSERT(offset >= 0);
+    firstByteOffset = size_t(offset);
+    lastByteOffset = firstByteOffset + (size - 1);
+    assembler->unprotectDataRegion(firstByteOffset, lastByteOffset);
+}
+
+MOZ_ALWAYS_INLINE
+AutoUnprotectAssemblerBufferRegion::~AutoUnprotectAssemblerBufferRegion()
+{
+    assembler->reprotectDataRegion(firstByteOffset, lastByteOffset);
+}
+
 } // namespace X86Encoding
 
 } // namespace jit
 } // namespace js
 
 #endif /* jit_x86_shared_BaseAssembler_x86_shared_h */