Bug 1445272: Add (entry/exit) stubs support for anyref; r=luke
authorBenjamin Bouvier <benj@benj.me>
Fri, 30 Mar 2018 16:31:40 +0200
changeset 466792 f8a4c128ffd4989884dac54155430cffb04b947a
parent 466791 670d462e97cbd93bf7856ee6793ecd530394c928
child 466793 7be1a707d56dd497e2125fec6b52f86265a2dc73
push id9165
push userasasaki@mozilla.com
push dateThu, 26 Apr 2018 21:04:54 +0000
treeherdermozilla-beta@064c3804de2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersluke
bugs1445272
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1445272: Add (entry/exit) stubs support for anyref; r=luke
js/src/vm/JSFunction.h
js/src/wasm/WasmBuiltins.cpp
js/src/wasm/WasmCode.cpp
js/src/wasm/WasmFrameIter.cpp
js/src/wasm/WasmInstance.cpp
js/src/wasm/WasmInstance.h
js/src/wasm/WasmJS.cpp
js/src/wasm/WasmStubs.cpp
js/src/wasm/WasmTypes.h
--- a/js/src/vm/JSFunction.h
+++ b/js/src/vm/JSFunction.h
@@ -90,16 +90,17 @@ class JSFunction : public js::NativeObje
         SETTER_KIND = Setter << FUNCTION_KIND_SHIFT,
 
         /* Derived Flags values for convenience: */
         NATIVE_FUN = 0,
         NATIVE_CTOR = NATIVE_FUN | CONSTRUCTOR,
         NATIVE_CLASS_CTOR = NATIVE_FUN | CONSTRUCTOR | CLASSCONSTRUCTOR_KIND,
         ASMJS_CTOR = ASMJS_KIND | NATIVE_CTOR,
         ASMJS_LAMBDA_CTOR = ASMJS_KIND | NATIVE_CTOR | LAMBDA,
+        ASMJS_NATIVE = ASMJS_KIND | NATIVE_FUN,
         WASM_FUN = NATIVE_FUN | WASM_OPTIMIZED,
         INTERPRETED_METHOD = INTERPRETED | METHOD_KIND,
         INTERPRETED_METHOD_GENERATOR_OR_ASYNC = INTERPRETED | METHOD_KIND,
         INTERPRETED_CLASS_CONSTRUCTOR = INTERPRETED | CLASSCONSTRUCTOR_KIND | CONSTRUCTOR,
         INTERPRETED_GETTER = INTERPRETED | GETTER_KIND,
         INTERPRETED_SETTER = INTERPRETED | SETTER_KIND,
         INTERPRETED_LAMBDA = INTERPRETED | LAMBDA | CONSTRUCTOR,
         INTERPRETED_LAMBDA_ARROW = INTERPRETED | LAMBDA | ARROW_KIND,
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -531,16 +531,19 @@ AddressOf(SymbolicAddress imm, ABIFuncti
         *abiType = Args_General4;
         return FuncCast(Instance::callImport_i32, *abiType);
       case SymbolicAddress::CallImport_I64:
         *abiType = Args_General4;
         return FuncCast(Instance::callImport_i64, *abiType);
       case SymbolicAddress::CallImport_F64:
         *abiType = Args_General4;
         return FuncCast(Instance::callImport_f64, *abiType);
+      case SymbolicAddress::CallImport_Ref:
+        *abiType = Args_General4;
+        return FuncCast(Instance::callImport_ref, *abiType);
       case SymbolicAddress::CoerceInPlace_ToInt32:
         *abiType = Args_General1;
         return FuncCast(CoerceInPlace_ToInt32, *abiType);
       case SymbolicAddress::CoerceInPlace_ToNumber:
         *abiType = Args_General1;
         return FuncCast(CoerceInPlace_ToNumber, *abiType);
       case SymbolicAddress::CoerceInPlace_JitEntry:
         *abiType = Args_General3;
@@ -685,16 +688,17 @@ wasm::NeedsBuiltinThunk(SymbolicAddress 
       case SymbolicAddress::HandleThrow:              // GenerateThrowStub
       case SymbolicAddress::HandleTrap:               // GenerateTrapExit
       case SymbolicAddress::ReportOutOfBounds:        // GenerateOutOfBoundsExit
       case SymbolicAddress::ReportUnalignedAccess:    // GenerateUnalignedExit
       case SymbolicAddress::CallImport_Void:          // GenerateImportInterpExit
       case SymbolicAddress::CallImport_I32:
       case SymbolicAddress::CallImport_I64:
       case SymbolicAddress::CallImport_F64:
+      case SymbolicAddress::CallImport_Ref:
       case SymbolicAddress::CoerceInPlace_ToInt32:    // GenerateImportJitExit
       case SymbolicAddress::CoerceInPlace_ToNumber:
 #if defined(JS_CODEGEN_MIPS32)
       case SymbolicAddress::js_jit_gAtomic64Lock:
 #endif
         return false;
       case SymbolicAddress::ToInt32:
       case SymbolicAddress::DivI64:
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -604,32 +604,35 @@ LazyStubSegment::addStubs(size_t codeLen
     usedBytes_ += codeLength;
 
     *indexFirstInsertedCodeRange = codeRanges_.length();
 
     if (!codeRanges_.reserve(codeRanges_.length() + 2 * codeRanges.length()))
         return false;
 
     size_t i = 0;
-    for (DebugOnly<uint32_t> funcExportIndex : funcExportIndices) {
+    for (uint32_t funcExportIndex : funcExportIndices) {
         const CodeRange& interpRange = codeRanges[i];
         MOZ_ASSERT(interpRange.isInterpEntry());
         MOZ_ASSERT(interpRange.funcIndex() == funcExports[funcExportIndex].funcIndex());
 
         codeRanges_.infallibleAppend(interpRange);
         codeRanges_.back().offsetBy(offsetInSegment);
+        i++;
 
-        const CodeRange& jitRange = codeRanges[i + 1];
+        if (funcExports[funcExportIndex].sig().temporarilyUnsupportedAnyRef())
+            continue;
+
+        const CodeRange& jitRange = codeRanges[i];
         MOZ_ASSERT(jitRange.isJitEntry());
         MOZ_ASSERT(jitRange.funcIndex() == interpRange.funcIndex());
 
         codeRanges_.infallibleAppend(jitRange);
         codeRanges_.back().offsetBy(offsetInSegment);
-
-        i += 2;
+        i++;
     }
 
     return true;
 }
 
 const CodeRange*
 LazyStubSegment::lookupRange(const void* pc) const
 {
@@ -668,26 +671,28 @@ LazyStubTier::createMany(const Uint32Vec
     JitContext jitContext(&alloc);
     WasmMacroAssembler masm(alloc);
 
     const CodeRangeVector& moduleRanges = codeTier.metadata().codeRanges;
     const FuncExportVector& funcExports = codeTier.metadata().funcExports;
     uint8_t* moduleSegmentBase = codeTier.segment().base();
 
     CodeRangeVector codeRanges;
+    DebugOnly<uint32_t> numExpectedRanges = 0;
     for (uint32_t funcExportIndex : funcExportIndices) {
         const FuncExport& fe = funcExports[funcExportIndex];
+        numExpectedRanges += fe.sig().temporarilyUnsupportedAnyRef() ? 1 : 2;
         void* calleePtr = moduleSegmentBase +
                           moduleRanges[fe.interpCodeRangeIndex()].funcNormalEntry();
         Maybe<ImmPtr> callee;
         callee.emplace(calleePtr, ImmPtr::NoCheckToken());
         if (!GenerateEntryStubs(masm, funcExportIndex, fe, callee, /* asmjs*/ false, &codeRanges))
             return false;
     }
-    MOZ_ASSERT(codeRanges.length() == 2 * funcExportIndices.length(), "two entries per function");
+    MOZ_ASSERT(codeRanges.length() == numExpectedRanges, "incorrect number of entries per function");
 
     masm.finish();
 
     MOZ_ASSERT(!masm.numCodeLabels());
     MOZ_ASSERT(masm.callSites().empty());
     MOZ_ASSERT(masm.callSiteTargets().empty());
     MOZ_ASSERT(masm.callFarJumps().empty());
     MOZ_ASSERT(masm.trapSites().empty());
@@ -738,17 +743,19 @@ LazyStubTier::createMany(const Uint32Vec
 
         LazyFuncExport lazyExport(fe.funcIndex(), *stubSegmentIndex, interpRangeIndex);
 
         size_t exportIndex;
         MOZ_ALWAYS_FALSE(BinarySearch(ProjectLazyFuncIndex(exports_), 0, exports_.length(),
                                       fe.funcIndex(), &exportIndex));
         MOZ_ALWAYS_TRUE(exports_.insert(exports_.begin() + exportIndex, Move(lazyExport)));
 
-        interpRangeIndex += 2;
+        // Functions with anyref in their sig have only one entry (interp).
+        // All other functions get an extra jit entry.
+        interpRangeIndex += fe.sig().temporarilyUnsupportedAnyRef() ? 1 : 2;
     }
 
     return true;
 }
 
 bool
 LazyStubTier::createOne(uint32_t funcExportIndex, const CodeTier& codeTier)
 {
@@ -758,16 +765,23 @@ LazyStubTier::createOne(uint32_t funcExp
 
     size_t stubSegmentIndex;
     if (!createMany(funcExportIndexes, codeTier, &stubSegmentIndex))
         return false;
 
     const UniqueLazyStubSegment& segment = stubSegments_[stubSegmentIndex];
     const CodeRangeVector& codeRanges = segment->codeRanges();
 
+    // Functions that have anyref in their sig don't get a jit entry.
+    if (codeTier.metadata().funcExports[funcExportIndex].sig().temporarilyUnsupportedAnyRef()) {
+        MOZ_ASSERT(codeRanges.length() >= 1);
+        MOZ_ASSERT(codeRanges.back().isInterpEntry());
+        return true;
+    }
+
     MOZ_ASSERT(codeRanges.length() >= 2);
     MOZ_ASSERT(codeRanges[codeRanges.length() - 2].isInterpEntry());
 
     const CodeRange& cr = codeRanges[codeRanges.length() - 1];
     MOZ_ASSERT(cr.isJitEntry());
 
     codeTier.code().setJitEntry(cr.funcIndex(), segment->base() + cr.begin());
     return true;
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -1134,16 +1134,17 @@ ThunkedNativeToDescription(SymbolicAddre
       case SymbolicAddress::HandleThrow:
       case SymbolicAddress::HandleTrap:
       case SymbolicAddress::ReportOutOfBounds:
       case SymbolicAddress::ReportUnalignedAccess:
       case SymbolicAddress::CallImport_Void:
       case SymbolicAddress::CallImport_I32:
       case SymbolicAddress::CallImport_I64:
       case SymbolicAddress::CallImport_F64:
+      case SymbolicAddress::CallImport_Ref:
       case SymbolicAddress::CoerceInPlace_ToInt32:
       case SymbolicAddress::CoerceInPlace_ToNumber:
         MOZ_ASSERT(!NeedsBuiltinThunk(func), "not in sync with NeedsBuiltinThunk");
         break;
       case SymbolicAddress::ToInt32:
         return "call to asm.js native ToInt32 coercion (in wasm)";
       case SymbolicAddress::DivI64:
         return "call to native i64.div_s (in wasm)";
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -128,16 +128,20 @@ Instance::callImport(JSContext* cx, uint
             args[i].set(Int32Value(*(int32_t*)&argv[i]));
             break;
           case ValType::F32:
             args[i].set(JS::CanonicalizedDoubleValue(*(float*)&argv[i]));
             break;
           case ValType::F64:
             args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
             break;
+          case ValType::AnyRef: {
+            args[i].set(ObjectOrNullValue(*(JSObject**)&argv[i]));
+            break;
+          }
           case ValType::I64:
           case ValType::I8x16:
           case ValType::I16x8:
           case ValType::I32x4:
           case ValType::F32x4:
           case ValType::B8x16:
           case ValType::B16x8:
           case ValType::B32x4:
@@ -183,33 +187,38 @@ Instance::callImport(JSContext* cx, uint
     //
     // Note that the TypeScript is never discarded while the script has a
     // BaselineScript, so if those checks hold now they must hold at least until
     // the BaselineScript is discarded and when that happens the import is
     // patched back.
     if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
         return true;
 
+    // Functions with anyref in signature don't have a jit exit at the moment.
+    if (fi.sig().temporarilyUnsupportedAnyRef())
+        return true;
+
     const ValTypeVector& importArgs = fi.sig().args();
 
     size_t numKnownArgs = Min(importArgs.length(), importFun->nargs());
     for (uint32_t i = 0; i < numKnownArgs; i++) {
         TypeSet::Type type = TypeSet::UnknownType();
         switch (importArgs[i]) {
-          case ValType::I32:   type = TypeSet::Int32Type(); break;
-          case ValType::F32:   type = TypeSet::DoubleType(); break;
-          case ValType::F64:   type = TypeSet::DoubleType(); break;
-          case ValType::I64:   MOZ_CRASH("NYI");
-          case ValType::I8x16: MOZ_CRASH("NYI");
-          case ValType::I16x8: MOZ_CRASH("NYI");
-          case ValType::I32x4: MOZ_CRASH("NYI");
-          case ValType::F32x4: MOZ_CRASH("NYI");
-          case ValType::B8x16: MOZ_CRASH("NYI");
-          case ValType::B16x8: MOZ_CRASH("NYI");
-          case ValType::B32x4: MOZ_CRASH("NYI");
+          case ValType::I32:    type = TypeSet::Int32Type(); break;
+          case ValType::F32:    type = TypeSet::DoubleType(); break;
+          case ValType::F64:    type = TypeSet::DoubleType(); break;
+          case ValType::AnyRef: MOZ_CRASH("case guarded above");
+          case ValType::I64:    MOZ_CRASH("NYI");
+          case ValType::I8x16:  MOZ_CRASH("NYI");
+          case ValType::I16x8:  MOZ_CRASH("NYI");
+          case ValType::I32x4:  MOZ_CRASH("NYI");
+          case ValType::F32x4:  MOZ_CRASH("NYI");
+          case ValType::B8x16:  MOZ_CRASH("NYI");
+          case ValType::B16x8:  MOZ_CRASH("NYI");
+          case ValType::B32x4:  MOZ_CRASH("NYI");
         }
         if (!TypeScript::ArgTypes(script, i)->hasType(type))
             return true;
     }
 
     // These arguments will be filled with undefined at runtime by the
     // arguments rectifier: check that the imported function can handle
     // undefined there.
@@ -260,16 +269,41 @@ Instance::callImport_f64(Instance* insta
     JSContext* cx = TlsContext.get();
     RootedValue rval(cx);
     if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
         return false;
 
     return ToNumber(cx, rval, (double*)argv);
 }
 
+static bool
+ToRef(JSContext* cx, HandleValue val, void* addr)
+{
+    if (val.isNull()) {
+        *(JSObject**)addr = nullptr;
+        return true;
+    }
+
+    JSObject* obj = ToObject(cx, val);
+    if (!obj)
+        return false;
+    *(JSObject**)addr = obj;
+    return true;
+}
+
+/* static */ int32_t
+Instance::callImport_ref(Instance* instance, int32_t funcImportIndex, int32_t argc, uint64_t* argv)
+{
+    JSContext* cx = TlsContext.get();
+    RootedValue rval(cx);
+    if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
+        return false;
+    return ToRef(cx, rval, argv);
+}
+
 /* static */ uint32_t
 Instance::growMemory_i32(Instance* instance, uint32_t delta)
 {
     MOZ_ASSERT(!instance->isAsmJS());
 
     JSContext* cx = TlsContext.get();
     RootedWasmMemoryObject memory(cx, instance->memory_);
 
@@ -667,16 +701,21 @@ Instance::callExport(JSContext* cx, uint
           case ValType::F32:
             if (!RoundFloat32(cx, v, (float*)&exportArgs[i]))
                 return false;
             break;
           case ValType::F64:
             if (!ToNumber(cx, v, (double*)&exportArgs[i]))
                 return false;
             break;
+          case ValType::AnyRef: {
+            if (!ToRef(cx, v, &exportArgs[i]))
+                return false;
+            break;
+          }
           case ValType::I8x16: {
             SimdConstant simd;
             if (!ToSimdConstant<Int8x16>(cx, v, &simd))
                 return false;
             memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
             break;
           }
           case ValType::I16x8: {
@@ -750,32 +789,38 @@ Instance::callExport(JSContext* cx, uint
         PlainObject* obj = NewBuiltinClassInstance<PlainObject>(cx);
         if (!obj)
             return false;
         args.rval().set(ObjectValue(*obj));
         return true;
     }
 
     void* retAddr = &exportArgs[0];
+
+    bool expectsObject = false;
     JSObject* retObj = nullptr;
     switch (func.sig().ret()) {
       case ExprType::Void:
         args.rval().set(UndefinedValue());
         break;
       case ExprType::I32:
         args.rval().set(Int32Value(*(int32_t*)retAddr));
         break;
       case ExprType::I64:
         MOZ_CRASH("unexpected i64 flowing from callExport");
       case ExprType::F32:
         args.rval().set(NumberValue(*(float*)retAddr));
         break;
       case ExprType::F64:
         args.rval().set(NumberValue(*(double*)retAddr));
         break;
+      case ExprType::AnyRef:
+        retObj = *(JSObject**)retAddr;
+        expectsObject = true;
+        break;
       case ExprType::I8x16:
         retObj = CreateSimd<Int8x16>(cx, (int8_t*)retAddr);
         if (!retObj)
             return false;
         break;
       case ExprType::I16x8:
         retObj = CreateSimd<Int16x8>(cx, (int16_t*)retAddr);
         if (!retObj)
@@ -805,17 +850,19 @@ Instance::callExport(JSContext* cx, uint
         retObj = CreateSimd<Bool32x4>(cx, (int32_t*)retAddr);
         if (!retObj)
             return false;
         break;
       case ExprType::Limit:
         MOZ_CRASH("Limit");
     }
 
-    if (retObj)
+    if (expectsObject)
+        args.rval().set(ObjectOrNullValue(retObj));
+    else if (retObj)
         args.rval().set(ObjectValue(*retObj));
 
     return true;
 }
 
 bool
 Instance::getFuncName(uint32_t funcIndex, UTF8Bytes* name) const
 {
--- a/js/src/wasm/WasmInstance.h
+++ b/js/src/wasm/WasmInstance.h
@@ -159,16 +159,17 @@ class Instance
                        size_t* data) const;
 
   public:
     // Functions to be called directly from wasm code.
     static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
     static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
+    static int32_t callImport_ref(Instance*, int32_t, int32_t, uint64_t*);
     static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
     static uint32_t currentMemory_i32(Instance* instance);
     static int32_t wait_i32(Instance* instance, uint32_t byteOffset, int32_t value, int64_t timeout);
     static int32_t wait_i64(Instance* instance, uint32_t byteOffset, int64_t value, int64_t timeout);
     static int32_t wake(Instance* instance, uint32_t byteOffset, int32_t count);
 };
 
 typedef UniquePtr<Instance> UniqueInstance;
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -1286,21 +1286,33 @@ WasmInstanceObject::getExportedFunction(
                                      SingletonObject, JSFunction::ASMJS_CTOR));
         if (!fun)
             return false;
         fun->setAsmJSIndex(funcIndex);
     } else {
         RootedAtom name(cx, NumberToAtom(cx, funcIndex));
         if (!name)
             return false;
+
+        // Functions with anyref don't have jit entries yet, so they should
+        // mostly behave like asm.js functions. Pretend it's the case, until
+        // jit entries are implemented.
+        JSFunction::Flags flags = sig.temporarilyUnsupportedAnyRef()
+                                ? JSFunction::ASMJS_NATIVE
+                                : JSFunction::WASM_FUN;
+
         fun.set(NewNativeFunction(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED,
-                                  SingletonObject, JSFunction::WASM_FUN));
+                                  SingletonObject, flags));
         if (!fun)
             return false;
-        fun->setWasmJitEntry(instance.code().getAddressOfJitEntry(funcIndex));
+
+        if (sig.temporarilyUnsupportedAnyRef())
+            fun->setAsmJSIndex(funcIndex);
+        else
+            fun->setWasmJitEntry(instance.code().getAddressOfJitEntry(funcIndex));
     }
 
     fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
 
     void* tlsData = instanceObj->instance().tlsData();
     fun->setExtendedSlot(FunctionExtended::WASM_TLSDATA_SLOT, PrivateValue(tlsData));
 
     if (!instanceObj->exports().putNew(funcIndex, fun)) {
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -87,16 +87,20 @@ SetupABIArguments(MacroAssembler& masm, 
         Address src(argv, argOffset);
         MIRType type = iter.mirType();
         switch (iter->kind()) {
           case ABIArg::GPR:
             if (type == MIRType::Int32)
                 masm.load32(src, iter->gpr());
             else if (type == MIRType::Int64)
                 masm.load64(src, iter->gpr64());
+            else if (type == MIRType::Pointer)
+                masm.loadPtr(src, iter->gpr());
+            else
+                MOZ_CRASH("unknown GPR type");
             break;
 #ifdef JS_CODEGEN_REGISTER_PAIR
           case ABIArg::GPR_PAIR:
             if (type == MIRType::Int64)
                 masm.load64(src, iter->gpr64());
             else
                 MOZ_CRASH("wasm uses hardfp for function calls.");
             break;
@@ -143,16 +147,20 @@ SetupABIArguments(MacroAssembler& masm, 
                 masm.store32(scratch, HighWord(Address(sp, iter->offsetFromArgBase())));
 #else
                 Register64 scratch64(scratch);
                 masm.load64(src, scratch64);
                 masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
 #endif
                 break;
               }
+              case MIRType::Pointer:
+                masm.loadPtr(src, scratch);
+                masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+                break;
               case MIRType::Double:
                 masm.loadDouble(src, ScratchDoubleReg);
                 masm.storeDouble(ScratchDoubleReg,
                                  Address(masm.getStackPointer(), iter->offsetFromArgBase()));
                 break;
               case MIRType::Float32:
                 masm.loadFloat32(src, ScratchFloat32Reg);
                 masm.storeFloat32(ScratchFloat32Reg,
@@ -199,16 +207,19 @@ StoreABIReturn(MacroAssembler& masm, con
       case ExprType::F32:
         masm.canonicalizeFloat(ReturnFloat32Reg);
         masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
         break;
       case ExprType::F64:
         masm.canonicalizeDouble(ReturnDoubleReg);
         masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
         break;
+      case ExprType::AnyRef:
+        masm.storePtr(ReturnReg, Address(argv, 0));
+        break;
       case ExprType::I8x16:
       case ExprType::I16x8:
       case ExprType::I32x4:
       case ExprType::B8x16:
       case ExprType::B16x8:
       case ExprType::B32x4:
         // We don't have control on argv alignment, do an unaligned access.
         masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
@@ -753,16 +764,19 @@ GenerateJitEntry(MacroAssembler& masm, s
         masm.canonicalizeFloat(ReturnFloat32Reg);
         masm.convertFloat32ToDouble(ReturnFloat32Reg, ReturnDoubleReg);
         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
         break;
       case ExprType::F64:
         masm.canonicalizeDouble(ReturnDoubleReg);
         masm.boxDouble(ReturnDoubleReg, JSReturnOperand, ScratchDoubleReg);
         break;
+      case ExprType::AnyRef:
+        MOZ_CRASH("return anyref in jitentry NYI");
+        break;
       case ExprType::I64:
       case ExprType::I8x16:
       case ExprType::I16x8:
       case ExprType::I32x4:
       case ExprType::B8x16:
       case ExprType::B16x8:
       case ExprType::B32x4:
       case ExprType::F32x4:
@@ -840,16 +854,19 @@ StackCopy(MacroAssembler& masm, MIRType 
         masm.store32(scratch, LowWord(dst));
         masm.load32(HighWord(src), scratch);
         masm.store32(scratch, HighWord(dst));
 #else
         Register64 scratch64(scratch);
         masm.load64(src, scratch64);
         masm.store64(scratch64, dst);
 #endif
+    } else if (type == MIRType::Pointer) {
+        masm.loadPtr(src, scratch);
+        masm.storePtr(scratch, dst);
     } else if (type == MIRType::Float32) {
         masm.loadFloat32(src, ScratchFloat32Reg);
         masm.storeFloat32(ScratchFloat32Reg, dst);
     } else {
         MOZ_ASSERT(type == MIRType::Double);
         masm.loadDouble(src, ScratchDoubleReg);
         masm.storeDouble(ScratchDoubleReg, dst);
     }
@@ -873,18 +890,20 @@ FillArgumentArray(MacroAssembler& masm, 
                 else
                     masm.store32(i->gpr(), dst);
             } else if (type == MIRType::Int64) {
                 // We can't box int64 into Values (yet).
                 if (toValue)
                     masm.breakpoint();
                 else
                     masm.store64(i->gpr64(), dst);
-            } else {
-                MOZ_CRASH("unexpected input type?");
+            } else if (type == MIRType::Pointer) {
+                if (toValue)
+                    MOZ_CRASH("generating a jit exit for anyref NYI");
+                masm.storePtr(i->gpr(), dst);
             }
             break;
 #ifdef JS_CODEGEN_REGISTER_PAIR
           case ABIArg::GPR_PAIR:
             if (type == MIRType::Int64)
                 masm.store64(i->gpr64(), dst);
             else
                 MOZ_CRASH("wasm uses hardfp for function calls.");
@@ -921,16 +940,18 @@ FillArgumentArray(MacroAssembler& masm, 
             Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
             if (toValue) {
                 if (type == MIRType::Int32) {
                     masm.load32(src, scratch);
                     masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
                 } else if (type == MIRType::Int64) {
                     // We can't box int64 into Values (yet).
                     masm.breakpoint();
+                } else if (type == MIRType::Pointer) {
+                    MOZ_CRASH("generating a jit exit for anyref NYI");
                 } else {
                     MOZ_ASSERT(IsFloatingPointType(type));
                     if (type == MIRType::Float32) {
                         masm.loadFloat32(src, ScratchFloat32Reg);
                         masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
                     } else {
                         masm.loadDouble(src, ScratchDoubleReg);
                     }
@@ -1117,16 +1138,21 @@ GenerateImportInterpExit(MacroAssembler&
         masm.loadDouble(argv, ReturnDoubleReg);
         masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
         break;
       case ExprType::F64:
         masm.call(SymbolicAddress::CallImport_F64);
         masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
         masm.loadDouble(argv, ReturnDoubleReg);
         break;
+      case ExprType::AnyRef:
+        masm.call(SymbolicAddress::CallImport_Ref);
+        masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+        masm.loadPtr(argv, ReturnReg);
+        break;
       case ExprType::I8x16:
       case ExprType::I16x8:
       case ExprType::I32x4:
       case ExprType::F32x4:
       case ExprType::B8x16:
       case ExprType::B16x8:
       case ExprType::B32x4:
         MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
@@ -1292,16 +1318,19 @@ GenerateImportJitExit(MacroAssembler& ma
         masm.breakpoint();
         break;
       case ExprType::F32:
         masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
         break;
       case ExprType::F64:
         masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
         break;
+      case ExprType::AnyRef:
+        MOZ_CRASH("anyref returned by import (jit exit) NYI");
+        break;
       case ExprType::I8x16:
       case ExprType::I16x8:
       case ExprType::I32x4:
       case ExprType::F32x4:
       case ExprType::B8x16:
       case ExprType::B16x8:
       case ExprType::B32x4:
         MOZ_CRASH("SIMD types shouldn't be returned from an import");
@@ -1707,17 +1736,17 @@ wasm::GenerateEntryStubs(MacroAssembler&
     MOZ_ASSERT_IF(isAsmJS, fe.hasEagerStubs());
 
     Offsets offsets;
     if (!GenerateInterpEntry(masm, fe, callee, &offsets))
         return false;
     if (!codeRanges->emplaceBack(CodeRange::InterpEntry, fe.funcIndex(), offsets))
         return false;
 
-    if (isAsmJS)
+    if (isAsmJS || fe.sig().temporarilyUnsupportedAnyRef())
         return true;
 
     if (!GenerateJitEntry(masm, funcExportIndex, fe, callee, &offsets))
         return false;
     if (!codeRanges->emplaceBack(CodeRange::JitEntry, fe.funcIndex(), offsets))
         return false;
 
     return true;
@@ -1743,16 +1772,19 @@ wasm::GenerateStubs(const ModuleEnvironm
         const FuncImport& fi = imports[funcIndex];
 
         CallableOffsets interpOffsets;
         if (!GenerateImportInterpExit(masm, fi, funcIndex, &throwLabel, &interpOffsets))
             return false;
         if (!code->codeRanges.emplaceBack(CodeRange::ImportInterpExit, funcIndex, interpOffsets))
             return false;
 
+        if (fi.sig().temporarilyUnsupportedAnyRef())
+            continue;
+
         JitExitOffsets jitOffsets;
         if (!GenerateImportJitExit(masm, fi, &throwLabel, &jitOffsets))
             return false;
         if (!code->codeRanges.emplaceBack(funcIndex, jitOffsets))
             return false;
     }
 
     JitSpew(JitSpew_Codegen, "# Emitting wasm export stubs");
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -586,18 +586,27 @@ class Sig
     }
     bool operator!=(const Sig& rhs) const {
         return !(*this == rhs);
     }
 
     bool hasI64ArgOrRet() const {
         if (ret() == ExprType::I64)
             return true;
-        for (ValType a : args()) {
-            if (a == ValType::I64)
+        for (ValType arg : args()) {
+            if (arg == ValType::I64)
+                return true;
+        }
+        return false;
+    }
+    bool temporarilyUnsupportedAnyRef() const {
+        if (ret() == ExprType::AnyRef)
+            return true;
+        for (ValType arg : args()) {
+            if (arg == ValType::AnyRef)
                 return true;
         }
         return false;
     }
 
     WASM_DECLARE_SERIALIZABLE(Sig)
 };
 
@@ -1454,16 +1463,17 @@ enum class SymbolicAddress
     HandleTrap,
     ReportOutOfBounds,
     ReportUnalignedAccess,
     ReportInt64JSCall,
     CallImport_Void,
     CallImport_I32,
     CallImport_I64,
     CallImport_F64,
+    CallImport_Ref,
     CoerceInPlace_ToInt32,
     CoerceInPlace_ToNumber,
     CoerceInPlace_JitEntry,
     DivI64,
     UDivI64,
     ModI64,
     UModI64,
     TruncateDoubleToInt64,