Bug 1592783 - Change bulk-memory instructions to trap before writing. r=lth
authorRyan Hunt <rhunt@eqrion.net>
Tue, 05 Nov 2019 15:27:07 +0000
changeset 500613 801e6ae4efdeb516250c6800e31f969d337b8f40
parent 500612 a465ca5c85f272e8db9088cad2d9217adabcb6e3
child 500614 70bd926c6ca95e796a9187a437993688ffae3c3d
push id36768
push usershindli@mozilla.com
push dateTue, 05 Nov 2019 22:07:34 +0000
treeherdermozilla-central@e96c1ca93d25 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1592783
milestone72.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1592783 - Change bulk-memory instructions to trap before writing. r=lth This commit changes all bulk-memory instructions to perform up-front bounds checks and trap if any access would be out-of-bounds before writing. This affects: * memory.init,copy,fill * table.init,copy,fill * data segment instantiation (reduces to memory.init) * elem segment instantiation (reduces to table.init) Spec issue: https://github.com/WebAssembly/bulk-memory-operations/issues/111 Differential Revision: https://phabricator.services.mozilla.com/D51755
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmModule.cpp
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -436,65 +436,34 @@ static int32_t PerformWait(Instance* ins
 
   return int32_t(woken);
 }
 
 template <typename T, typename F>
 inline int32_t WasmMemoryCopy(T memBase, uint32_t memLen,
                               uint32_t dstByteOffset, uint32_t srcByteOffset,
                               uint32_t len, F memMove) {
-  if (len == 0) {
-    // Zero length copies that are out-of-bounds do not trap.
-    return 0;
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t dstOffsetLimit = uint64_t(dstByteOffset) + uint64_t(len);
+  uint64_t srcOffsetLimit = uint64_t(srcByteOffset) + uint64_t(len);
+
+  if (dstOffsetLimit > memLen || srcOffsetLimit > memLen) {
+    if (len == 0) {
+      // Zero length copies that are out-of-bounds do not trap.
+      return 0;
+    }
+
+    JSContext* cx = TlsContext.get();
+    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                              JSMSG_WASM_OUT_OF_BOUNDS);
+    return -1;
   }
 
-  // Here, we know that |len - 1| cannot underflow.
-  bool mustTrap = false;
-
-  // As we're supposed to write data until we trap we have to deal with
-  // arithmetic overflow in the limit calculation.
-  uint64_t highestDstOffset = uint64_t(dstByteOffset) + uint64_t(len - 1);
-  uint64_t highestSrcOffset = uint64_t(srcByteOffset) + uint64_t(len - 1);
-
-  bool copyDown = srcByteOffset < dstByteOffset;
-
-  if (highestDstOffset >= memLen || highestSrcOffset >= memLen) {
-    // We would read past the end of the source or write past the end of the
-    // target.
-    if (copyDown) {
-      // We would trap on the first read or write, so don't read or write
-      // anything.
-      len = 0;
-    } else {
-      // Compute what we have space for in target and what's available in the
-      // source and pick the lowest value as the new len.
-      uint64_t srcAvail = memLen < srcByteOffset ? 0 : memLen - srcByteOffset;
-      uint64_t dstAvail = memLen < dstByteOffset ? 0 : memLen - dstByteOffset;
-      MOZ_ASSERT(len > Min(srcAvail, dstAvail));
-      len = uint32_t(Min(srcAvail, dstAvail));
-    }
-    mustTrap = true;
-  }
-
-  if (len > 0) {
-    // The required write direction is indicated by `copyDown`, but apart from
-    // the trap that may happen without writing anything, the direction is not
-    // currently observable as there are no fences nor any read/write protect
-    // operation.  So memmove is good enough to handle overlaps.
-    memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
-  }
-
-  if (!mustTrap) {
-    return 0;
-  }
-
-  JSContext* cx = TlsContext.get();
-  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
-                            JSMSG_WASM_OUT_OF_BOUNDS);
-  return -1;
+  memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
+  return 0;
 }
 
 /* static */ int32_t Instance::memCopy(Instance* instance,
                                        uint32_t dstByteOffset,
                                        uint32_t srcByteOffset, uint32_t len,
                                        uint8_t* memBase) {
   MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
 
@@ -540,51 +509,35 @@ inline int32_t WasmMemoryCopy(T memBase,
   // Drop this instance's reference to the DataSegment so it can be released.
   segRefPtr = nullptr;
   return 0;
 }
 
 template <typename T, typename F>
 inline int32_t WasmMemoryFill(T memBase, uint32_t memLen, uint32_t byteOffset,
                               uint32_t value, uint32_t len, F memSet) {
-  if (len == 0) {
-    // Zero length fills that are out-of-bounds do not trap.
-    return 0;
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t offsetLimit = uint64_t(byteOffset) + uint64_t(len);
+
+  if (offsetLimit > memLen) {
+    if (len == 0) {
+      // Zero length fills that are out-of-bounds do not trap.
+      return 0;
+    }
+
+    JSContext* cx = TlsContext.get();
+    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                              JSMSG_WASM_OUT_OF_BOUNDS);
+    return -1;
   }
 
-  // Here, we know that |len - 1| cannot underflow.
-
-  bool mustTrap = false;
-
-  // We must write data until we trap, so we have to deal with arithmetic
-  // overflow in the limit calculation.
-  uint64_t highestOffset = uint64_t(byteOffset) + uint64_t(len - 1);
-  if (highestOffset >= memLen) {
-    // We would write past the end.  Compute what we have space for in the
-    // target and make that the new len.
-    uint64_t avail = memLen < byteOffset ? 0 : memLen - byteOffset;
-    MOZ_ASSERT(len > avail);
-    len = uint32_t(avail);
-    mustTrap = true;
-  }
-
-  if (len > 0) {
-    // The required write direction is upward, but that is not currently
-    // observable as there are no fences nor any read/write protect operation.
-    memSet(memBase + byteOffset, int(value), size_t(len));
-  }
-
-  if (!mustTrap) {
-    return 0;
-  }
-
-  JSContext* cx = TlsContext.get();
-  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
-                            JSMSG_WASM_OUT_OF_BOUNDS);
-  return -1;
+  // The required write direction is upward, but that is not currently
+  // observable as there are no fences nor any read/write protect operation.
+  memSet(memBase + byteOffset, int(value), size_t(len));
+  return 0;
 }
 
 /* static */ int32_t Instance::memFill(Instance* instance, uint32_t byteOffset,
                                        uint32_t value, uint32_t len,
                                        uint8_t* memBase) {
   MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
 
   const WasmArrayRawBuffer* rawBuf = WasmArrayRawBuffer::fromDataPtr(memBase);
@@ -631,145 +584,96 @@ inline int32_t WasmMemoryFill(T memBase,
   const uint32_t memLen = mem->volatileMemoryLength();
 
   // We are proposing to copy
   //
   //   seg.bytes.begin()[ srcOffset .. srcOffset + len - 1 ]
   // to
   //   memoryBase[ dstOffset .. dstOffset + len - 1 ]
 
-  if (len == 0) {
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+  uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+  if (dstOffsetLimit > memLen || srcOffsetLimit > segLen) {
     // Zero length inits that are out-of-bounds do not trap.
-    return 0;
+    if (len == 0) {
+      return 0;
+    }
+
+    JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+                              JSMSG_WASM_OUT_OF_BOUNDS);
+    return -1;
   }
 
-  // Here, we know that |len - 1| cannot underflow.
-
-  bool mustTrap = false;
-
-  // As we're supposed to write data until we trap we have to deal with
-  // arithmetic overflow in the limit calculation.
-  uint64_t highestDstOffset = uint64_t(dstOffset) + uint64_t(len - 1);
-  uint64_t highestSrcOffset = uint64_t(srcOffset) + uint64_t(len - 1);
-
-  if (highestDstOffset >= memLen || highestSrcOffset >= segLen) {
-    // We would read past the end of the source or write past the end of the
-    // target.  Compute what we have space for in target and what's available
-    // in the source and pick the lowest value as the new len.
-    uint64_t srcAvail = segLen < srcOffset ? 0 : segLen - srcOffset;
-    uint64_t dstAvail = memLen < dstOffset ? 0 : memLen - dstOffset;
-    MOZ_ASSERT(len > Min(srcAvail, dstAvail));
-    len = uint32_t(Min(srcAvail, dstAvail));
-    mustTrap = true;
+  // The required read/write direction is upward, but that is not currently
+  // observable as there are no fences nor any read/write protect operation.
+  SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
+  if (mem->isShared()) {
+    AtomicOperations::memcpySafeWhenRacy(
+        dataPtr + dstOffset, (uint8_t*)seg.bytes.begin() + srcOffset, len);
+  } else {
+    uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
+    memcpy(rawBuf + dstOffset, (const char*)seg.bytes.begin() + srcOffset, len);
   }
-
-  if (len > 0) {
-    // The required read/write direction is upward, but that is not currently
-    // observable as there are no fences nor any read/write protect operation.
-    SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
-    if (mem->isShared()) {
-      AtomicOperations::memcpySafeWhenRacy(
-          dataPtr + dstOffset, (uint8_t*)seg.bytes.begin() + srcOffset, len);
-    } else {
-      uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
-      memcpy(rawBuf + dstOffset, (const char*)seg.bytes.begin() + srcOffset,
-             len);
-    }
-  }
-
-  if (!mustTrap) {
-    return 0;
-  }
-
-  JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
-                            JSMSG_WASM_OUT_OF_BOUNDS);
-  return -1;
+  return 0;
 }
 
 /* static */ int32_t Instance::tableCopy(Instance* instance, uint32_t dstOffset,
                                          uint32_t srcOffset, uint32_t len,
                                          uint32_t dstTableIndex,
                                          uint32_t srcTableIndex) {
   MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
 
   const SharedTable& srcTable = instance->tables()[srcTableIndex];
   uint32_t srcTableLen = srcTable->length();
 
   const SharedTable& dstTable = instance->tables()[dstTableIndex];
   uint32_t dstTableLen = dstTable->length();
 
-  if (len == 0) {
-    // Zero length copies that are out-of-bounds do not trap.
-    return 0;
-  }
-
-  // Here, we know that |len - 1| cannot underflow.
-  bool isOOB = false;
-
-  // As we're supposed to write data until we trap we have to deal with
-  // arithmetic overflow in the limit calculation.
-  uint64_t highestDstOffset = uint64_t(dstOffset) + (len - 1);
-  uint64_t highestSrcOffset = uint64_t(srcOffset) + (len - 1);
-
-  bool copyDown = srcOffset < dstOffset;
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t dstOffsetLimit = uint64_t(dstOffset) + len;
+  uint64_t srcOffsetLimit = uint64_t(srcOffset) + len;
 
-  if (highestDstOffset >= dstTableLen || highestSrcOffset >= srcTableLen) {
-    // We would read past the end of the source or write past the end of the
-    // target.
-    if (copyDown) {
-      // We would trap on the first read or write, so don't read or write
-      // anything.
-      len = 0;
-    } else {
-      // Compute what we have space for in target and what's available in the
-      // source and pick the lowest value as the new len.
-      uint64_t srcAvail = srcTableLen < srcOffset ? 0 : srcTableLen - srcOffset;
-      uint64_t dstAvail = dstTableLen < dstOffset ? 0 : dstTableLen - dstOffset;
-      MOZ_ASSERT(len > Min(srcAvail, dstAvail));
-      len = uint32_t(Min(srcAvail, dstAvail));
+  if (dstOffsetLimit > dstTableLen || srcOffsetLimit > srcTableLen) {
+    // Zero length copies that are out-of-bounds do not trap.
+    if (len == 0) {
+      return 0;
     }
-    isOOB = true;
+
+    JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+                              JSMSG_WASM_OUT_OF_BOUNDS);
+    return -1;
   }
 
   bool isOOM = false;
 
-  if (len > 0) {
-    // The required write direction is indicated by `copyDown`, but apart from
-    // the trap that may happen without writing anything, the direction is not
-    // currently observable as there are no fences nor any read/write protect
-    // operation.  So Table::copy is good enough, so long as we handle
-    // overlaps.
-    if (&srcTable == &dstTable && dstOffset > srcOffset) {
-      for (uint32_t i = len; i > 0; i--) {
-        if (!dstTable->copy(*srcTable, dstOffset + (i - 1),
-                            srcOffset + (i - 1))) {
-          isOOM = true;
-          break;
-        }
+  if (&srcTable == &dstTable && dstOffset > srcOffset) {
+    for (uint32_t i = len; i > 0; i--) {
+      if (!dstTable->copy(*srcTable, dstOffset + (i - 1),
+                          srcOffset + (i - 1))) {
+        isOOM = true;
+        break;
       }
-    } else if (&srcTable == &dstTable && dstOffset == srcOffset) {
-      // No-op
-    } else {
-      for (uint32_t i = 0; i < len; i++) {
-        if (!dstTable->copy(*srcTable, dstOffset + i, srcOffset + i)) {
-          isOOM = true;
-          break;
-        }
+    }
+  } else if (&srcTable == &dstTable && dstOffset == srcOffset) {
+    // No-op
+  } else {
+    for (uint32_t i = 0; i < len; i++) {
+      if (!dstTable->copy(*srcTable, dstOffset + i, srcOffset + i)) {
+        isOOM = true;
+        break;
       }
     }
   }
 
-  if (!isOOB && !isOOM) {
-    return 0;
+  if (isOOM) {
+    return -1;
   }
-  if (isOOB && !isOOM) {
-    JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
-                              JSMSG_WASM_OUT_OF_BOUNDS);
-  }
-  return -1;
+  return 0;
 }
 
 /* static */ int32_t Instance::elemDrop(Instance* instance, uint32_t segIndex) {
   MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
 
   MOZ_RELEASE_ASSERT(size_t(segIndex) < instance->passiveElemSegments_.length(),
                      "ensured by validation");
 
@@ -868,104 +772,74 @@ bool Instance::initElems(uint32_t tableI
   const uint32_t tableLen = table.length();
 
   // We are proposing to copy
   //
   //   seg[ srcOffset .. srcOffset + len - 1 ]
   // to
   //   tableBase[ dstOffset .. dstOffset + len - 1 ]
 
-  if (len == 0) {
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t dstOffsetLimit = uint64_t(dstOffset) + uint64_t(len);
+  uint64_t srcOffsetLimit = uint64_t(srcOffset) + uint64_t(len);
+
+  if (dstOffsetLimit > tableLen || srcOffsetLimit > segLen) {
     // Zero length inits that are out-of-bounds do not trap.
-    return 0;
+    if (len == 0) {
+      return 0;
+    }
+
+    JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
+                              JSMSG_WASM_OUT_OF_BOUNDS);
+    return -1;
   }
 
-  // Here, we know that |len - 1| cannot underflow.
-  bool mustTrap = false;
-
-  // As we're supposed to write data until we trap we have to deal with
-  // arithmetic overflow in the limit calculation.
-  uint64_t highestDstOffset = uint64_t(dstOffset) + uint64_t(len - 1);
-  uint64_t highestSrcOffset = uint64_t(srcOffset) + uint64_t(len - 1);
-
-  if (highestDstOffset >= tableLen || highestSrcOffset >= segLen) {
-    // We would read past the end of the source or write past the end of the
-    // target.  Compute what we have space for in target and what's available
-    // in the source and pick the lowest value as the new len.
-    uint64_t srcAvail = segLen < srcOffset ? 0 : segLen - srcOffset;
-    uint64_t dstAvail = tableLen < dstOffset ? 0 : tableLen - dstOffset;
-    MOZ_ASSERT(len > Min(srcAvail, dstAvail));
-    len = uint32_t(Min(srcAvail, dstAvail));
-    mustTrap = true;
+  if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
+    return -1;  // OOM, which has already been reported.
   }
 
-  if (len > 0) {
-    if (!instance->initElems(tableIndex, seg, dstOffset, srcOffset, len)) {
-      return -1;  // OOM, which has already been reported.
-    }
-  }
-
-  if (!mustTrap) {
-    return 0;
-  }
-
-  JS_ReportErrorNumberASCII(TlsContext.get(), GetErrorMessage, nullptr,
-                            JSMSG_WASM_OUT_OF_BOUNDS);
-  return -1;
+  return 0;
 }
 
 /* static */ int32_t Instance::tableFill(Instance* instance, uint32_t start,
                                          void* value, uint32_t len,
                                          uint32_t tableIndex) {
   MOZ_ASSERT(SASigTableFill.failureMode == FailureMode::FailOnNegI32);
 
   JSContext* cx = TlsContext.get();
   Table& table = *instance->tables()[tableIndex];
 
-  if (len == 0) {
-    // Zero length fills that are out-of-bounds do not trap.
-    return 0;
-  }
-
-  // Here, we know that |len - 1| cannot underflow.
-
-  bool mustTrap = false;
+  // Bounds check and deal with arithmetic overflow.
+  uint64_t offsetLimit = uint64_t(start) + uint64_t(len);
 
-  // We must write the table until we trap, so we have to deal with
-  // arithmetic overflow in the limit calculation.
-  uint64_t highestOffset = uint64_t(start) + uint64_t(len - 1);
-  if (highestOffset >= table.length()) {
-    // We would write past the end.  Compute what we have space for in the
-    // target and make that the new len.
-    uint64_t avail = table.length() < start ? 0 : table.length() - start;
-    MOZ_ASSERT(len > avail);
-    len = uint32_t(avail);
-    mustTrap = true;
+  if (offsetLimit > table.length()) {
+    // Zero length fills that are out-of-bounds do not trap.
+    if (len == 0) {
+      return 0;
+    }
+
+    JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+                              JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
+    return -1;
   }
 
   AnyRef ref = AnyRef::fromCompiledCode(value);
 
   switch (table.kind()) {
     case TableKind::AnyRef:
       table.fillAnyRef(start, len, ref);
       break;
     case TableKind::FuncRef:
       table.fillFuncRef(start, len, ref, cx);
       break;
     case TableKind::AsmJS:
       MOZ_CRASH("not asm.js");
   }
 
-  if (!mustTrap) {
-    return 0;
-  }
-
-  JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
-                            JSMSG_WASM_TABLE_OUT_OF_BOUNDS);
-  return -1;
+  return 0;
 }
 
 /* static */ void* Instance::tableGet(Instance* instance, uint32_t index,
                                       uint32_t tableIndex) {
   MOZ_ASSERT(SASigTableGet.failureMode == FailureMode::FailOnInvalidRef);
 
   const Table& table = *instance->tables()[tableIndex];
   if (index >= table.length()) {
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -512,17 +512,18 @@ static bool AllSegmentsArePassive(const 
 bool Module::initSegments(JSContext* cx, HandleWasmInstanceObject instanceObj,
                           HandleWasmMemoryObject memoryObj,
                           const ValVector& globalImportValues) const {
   MOZ_ASSERT_IF(!memoryObj, AllSegmentsArePassive(dataSegments_));
 
   Instance& instance = instanceObj->instance();
   const SharedTableVector& tables = instance.tables();
 
-  // Bulk memory changes the error checking behavior: we may write partial data.
+  // Bulk memory changes the error checking behavior: we apply segments
+  // in-order and terminate if one has an out-of-bounds range.
   // We enable bulk memory semantics if shared memory is enabled.
 #ifdef ENABLE_WASM_BULKMEM_OPS
   const bool eagerBoundsCheck = false;
 #else
   // Bulk memory must be available if shared memory is enabled.
   const bool eagerBoundsCheck =
       !cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled();
 #endif
@@ -575,36 +576,27 @@ bool Module::initSegments(JSContext* cx,
       // Allow zero-sized initializations even if they are out-of-bounds. This
       // behavior technically only applies when bulk-memory-operations are
       // enabled, but we will fail with an error during eager bounds checking
       // above in that case.
       if (count == 0) {
         continue;
       }
 
-      bool fail = false;
       if (!eagerBoundsCheck) {
         uint32_t tableLength = tables[seg->tableIndex]->length();
-        if (offset > tableLength) {
-          fail = true;
-          count = 0;
-        } else if (tableLength - offset < count) {
-          fail = true;
-          count = tableLength - offset;
+        if (offset > tableLength || tableLength - offset < count) {
+          JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+                                   JSMSG_WASM_BAD_FIT, "elem", "table");
+          return false;
         }
       }
-      if (count) {
-        if (!instance.initElems(seg->tableIndex, *seg, offset, 0, count)) {
-          return false;  // OOM
-        }
-      }
-      if (fail) {
-        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
-                                 JSMSG_WASM_BAD_FIT, "elem", "table");
-        return false;
+
+      if (!instance.initElems(seg->tableIndex, *seg, offset, 0, count)) {
+        return false;  // OOM
       }
     }
   }
 
   if (memoryObj) {
     uint32_t memoryLength = memoryObj->volatileMemoryLength();
     uint8_t* memoryBase =
         memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
@@ -620,34 +612,24 @@ bool Module::initSegments(JSContext* cx,
       // Allow zero-sized initializations even if they are out-of-bounds. This
       // behavior technically only applies when bulk-memory-operations are
       // enabled, but we will fail with an error during eager bounds checking
       // above in that case.
       if (count == 0) {
         continue;
       }
 
-      bool fail = false;
       if (!eagerBoundsCheck) {
-        if (offset > memoryLength) {
-          fail = true;
-          count = 0;
-        } else if (memoryLength - offset < count) {
-          fail = true;
-          count = memoryLength - offset;
+        if (offset > memoryLength || memoryLength - offset < count) {
+          JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+                                   JSMSG_WASM_BAD_FIT, "data", "memory");
+          return false;
         }
       }
-      if (count) {
-        memcpy(memoryBase + offset, seg->bytes.begin(), count);
-      }
-      if (fail) {
-        JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
-                                 JSMSG_WASM_BAD_FIT, "data", "memory");
-        return false;
-      }
+      memcpy(memoryBase + offset, seg->bytes.begin(), count);
     }
   }
 
   return true;
 }
 
 static const Import& FindImportForFuncImport(const ImportVector& imports,
                                              uint32_t funcImportIndex) {