Bug 1229396: Propagate OOM when pushing elements to the uses array; r=jandem
authorBenjamin Bouvier <benj@benj.me>
Tue, 01 Dec 2015 18:28:51 +0100
changeset 309680 1b964e554709aedb32e894ca044edcfdca8f1298
parent 309679 ebadedcd40109681a4ce6748edab19313c9db363
child 309681 b7a7fcf24c4904f2c687eebfba067685a0e3699a
push id5513
push userraliiev@mozilla.com
push dateMon, 25 Jan 2016 13:55:34 +0000
treeherdermozilla-beta@5ee97dd05b5c [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjandem
bugs1229396
milestone45.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1229396: Propagate OOM when pushing elements to the uses array; r=jandem
js/src/jit/x64/MacroAssembler-x64.cpp
js/src/jit/x86/MacroAssembler-x86.cpp
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -27,58 +27,58 @@ MacroAssemblerX64::loadConstantDouble(do
     if (!dbl)
         return;
     // The constants will be stored in a pool appended to the text (see
     // finish()), so they will always be a fixed distance from the
     // instructions which reference them. This allows the instructions to use
     // PC-relative addressing. Use "jump" label support code, because we need
     // the same PC-relative address patching that jumps use.
     JmpSrc j = masm.vmovsd_ripr(dest.encoding());
-    dbl->uses.append(CodeOffset(j.offset()));
+    propagateOOM(dbl->uses.append(CodeOffset(j.offset())));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
     // See comment in loadConstantDouble
     JmpSrc j = masm.vmovss_ripr(dest.encoding());
-    flt->uses.append(CodeOffset(j.offset()));
+    propagateOOM(flt->uses.append(CodeOffset(j.offset())));
 }
 
 void
 MacroAssemblerX64::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Int32x4);
     JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
-    val->uses.append(CodeOffset(j.offset()));
+    propagateOOM(val->uses.append(CodeOffset(j.offset())));
 }
 
 void
 MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* val = getSimdData(v);
     if (!val)
         return;
     MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
     JmpSrc j = masm.vmovaps_ripr(dest.encoding());
-    val->uses.append(CodeOffset(j.offset()));
+    propagateOOM(val->uses.append(CodeOffset(j.offset())));
 }
 
 void
 MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
 {
     for (CodeOffset use : uses) {
         JmpDst dst(currentOffset());
         JmpSrc src(use.offset());
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -94,77 +94,77 @@ void
 MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
 {
     if (maybeInlineDouble(d, dest))
         return;
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
     masm.vmovsd_mr(nullptr, dest.encoding());
-    dbl->uses.append(CodeOffset(masm.size()));
+    propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
 {
     Double* dbl = getDouble(d);
     if (!dbl)
         return;
     masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
-    dbl->uses.append(CodeOffset(masm.size()));
+    propagateOOM(dbl->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
 {
     if (maybeInlineFloat(f, dest))
         return;
     Float* flt = getFloat(f);
     if (!flt)
         return;
     masm.vmovss_mr(nullptr, dest.encoding());
-    flt->uses.append(CodeOffset(masm.size()));
+    propagateOOM(flt->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
 {
     Float* flt = getFloat(f);
     if (!flt)
         return;
     masm.vaddss_mr(nullptr, dest.encoding(), dest.encoding());
-    flt->uses.append(CodeOffset(masm.size()));
+    propagateOOM(flt->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
     if (maybeInlineInt32x4(v, dest))
         return;
     SimdData* i4 = getSimdData(v);
     if (!i4)
         return;
     MOZ_ASSERT(i4->type() == SimdConstant::Int32x4);
     masm.vmovdqa_mr(nullptr, dest.encoding());
-    i4->uses.append(CodeOffset(masm.size()));
+    propagateOOM(i4->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest)
 {
     MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
     if (maybeInlineFloat32x4(v, dest))
         return;
     SimdData* f4 = getSimdData(v);
     if (!f4)
         return;
     MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
     masm.vmovaps_mr(nullptr, dest.encoding());
-    f4->uses.append(CodeOffset(masm.size()));
+    propagateOOM(f4->uses.append(CodeOffset(masm.size())));
 }
 
 void
 MacroAssemblerX86::finish()
 {
     if (!doubles_.empty())
         masm.haltingAlign(sizeof(double));
     for (const Double& d : doubles_) {