Bug 1445272: Implement basic anyref support in the baseline compiler; r=lth
authorBenjamin Bouvier <benj@benj.me>
Wed, 11 Apr 2018 19:03:04 +0200
changeset 413260 7be1a707d56dd497e2125fec6b52f86265a2dc73
parent 413259 f8a4c128ffd4989884dac54155430cffb04b947a
child 413261 3d46648331c1bd98b2f856f543d315fd87890f91
push id33840
push userapavel@mozilla.com
push dateFri, 13 Apr 2018 21:56:54 +0000
treeherdermozilla-central@6547c27303bc [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerslth
bugs1445272
milestone61.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1445272: Implement basic anyref support in the baseline compiler; r=lth
js/src/jit-test/tests/wasm/gc/anyref.js
js/src/jit-test/tests/wasm/gc/directives.txt
js/src/jit-test/tests/wasm/gc/disabled.js
js/src/wasm/WasmBaselineCompile.cpp
js/src/wasm/WasmBinaryConstants.h
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/anyref.js
@@ -0,0 +1,383 @@
+// Ensure that if gc types aren't enabled, test cases properly fail.
+
+if (!wasmGcEnabled()) {
+    quit(0);
+}
+
+// Dummy constructor.
+function Baguette(calories) {
+    this.calories = calories;
+}
+
+// Type checking.
+
+const { validate, CompileError } = WebAssembly;
+
+assertErrorMessage(() => wasmEvalText(`(module
+    (func (result anyref)
+        i32.const 42
+    )
+)`), CompileError, mismatchError('i32', 'anyref'));
+
+assertErrorMessage(() => wasmEvalText(`(module
+    (func (result anyref)
+        i32.const 0
+        ref.null anyref
+        i32.const 42
+        select
+    )
+)`), CompileError, /select operand types/);
+
+assertErrorMessage(() => wasmEvalText(`(module
+    (func (result i32)
+        ref.null anyref
+        if
+            i32.const 42
+        end
+    )
+)`), CompileError, mismatchError('anyref', 'i32'));
+
+
+// Basic compilation tests.
+
+let simpleTests = [
+    "(module (func (drop (ref.null anyref))))",
+    "(module (func $test (local anyref)))",
+    "(module (func $test (param anyref)))",
+    "(module (func $test (result anyref) (ref.null anyref)))",
+    "(module (func $test (block anyref (unreachable)) unreachable))",
+    "(module (func $test (local anyref) (result i32) (ref.is_null (get_local 0))))",
+    `(module (import "a" "b" (param anyref)))`,
+    `(module (import "a" "b" (result anyref)))`,
+];
+
+for (let src of simpleTests) {
+    wasmEvalText(src, {a:{b(){}}});
+    assertEq(validate(wasmTextToBinary(src)), true);
+}
+
+// Basic behavioral tests.
+
+let { exports } = wasmEvalText(`(module
+    (func (export "is_null") (result i32)
+        ref.null anyref
+        ref.is_null
+    )
+
+    (func $sum (result i32) (param i32)
+        get_local 0
+        i32.const 42
+        i32.add
+    )
+
+    (func (export "is_null_spill") (result i32)
+        ref.null anyref
+        i32.const 58
+        call $sum
+        drop
+        ref.is_null
+    )
+
+    (func (export "is_null_local") (result i32) (local anyref)
+        ref.null anyref
+        set_local 0
+        i32.const 58
+        call $sum
+        drop
+        get_local 0
+        ref.is_null
+    )
+)`);
+
+assertEq(exports.is_null(), 1);
+assertEq(exports.is_null_spill(), 1);
+assertEq(exports.is_null_local(), 1);
+
+// Anyref param and result in wasm functions.
+
+exports = wasmEvalText(`(module
+    (func (export "is_null") (result i32) (param $ref anyref)
+        get_local $ref
+        ref.is_null
+    )
+
+    (func (export "ref_or_null") (result anyref) (param $ref anyref) (param $selector i32)
+        get_local $ref
+        ref.null anyref
+        get_local $selector
+        select
+    )
+
+    (func $recursive (export "nested") (result anyref) (param $ref anyref) (param $i i32)
+        ;; i == 10 => ret $ref
+        get_local $i
+        i32.const 10
+        i32.eq
+        if
+            get_local $ref
+            return
+        end
+
+        get_local $ref
+
+        get_local $i
+        i32.const 1
+        i32.add
+
+        call $recursive
+    )
+)`).exports;
+
+assertErrorMessage(() => exports.is_null(undefined), TypeError, "can't convert undefined to object");
+assertEq(exports.is_null(null), 1);
+assertEq(exports.is_null({}), 0);
+assertEq(exports.is_null("hi"), 0);
+assertEq(exports.is_null(3), 0);
+assertEq(exports.is_null(3.5), 0);
+assertEq(exports.is_null(true), 0);
+assertEq(exports.is_null(Symbol("croissant")), 0);
+assertEq(exports.is_null(new Baguette(100)), 0);
+
+let baguette = new Baguette(42);
+assertEq(exports.ref_or_null(null, 0), null);
+assertEq(exports.ref_or_null(baguette, 0), null);
+
+let ref = exports.ref_or_null(baguette, 1);
+assertEq(ref, baguette);
+assertEq(ref.calories, baguette.calories);
+
+ref = exports.nested(baguette, 0);
+assertEq(ref, baguette);
+assertEq(ref.calories, baguette.calories);
+
+// More interesting use cases about control flow joins.
+
+function assertJoin(body) {
+    let val = { i: -1 };
+    assertEq(wasmEvalText(`(module
+        (func (export "test") (param $ref anyref) (param $i i32) (result anyref)
+            ${body}
+        )
+    )`).exports.test(val), val);
+    assertEq(val.i, -1);
+}
+
+assertJoin("(block anyref get_local $ref)");
+assertJoin("(block $out anyref get_local $ref br $out)");
+assertJoin("(loop anyref get_local $ref)");
+
+assertJoin(`(block $out anyref (loop $top anyref
+    get_local $i
+    i32.const 1
+    i32.add
+    tee_local $i
+    i32.const 10
+    i32.eq
+    if
+        get_local $ref
+        return
+    end
+    br $top))
+`);
+
+assertJoin(`(block $out (loop $top
+    get_local $i
+    i32.const 1
+    i32.add
+    tee_local $i
+    i32.const 10
+    i32.le_s
+    if
+        br $top
+    else
+        get_local $ref
+        return
+    end
+    )) unreachable
+`);
+
+assertJoin(`(block $out anyref (loop $top
+    get_local $ref
+    get_local $i
+    i32.const 1
+    i32.add
+    tee_local $i
+    i32.const 10
+    i32.eq
+    br_if $out
+    br $top
+    ) unreachable)
+`);
+
+assertJoin(`(block $out anyref (block $unreachable anyref (loop $top
+    get_local $ref
+    get_local $i
+    i32.const 1
+    i32.add
+    tee_local $i
+    br_table $unreachable $out
+    ) unreachable))
+`);
+
+let x = { i: 42 }, y = { f: 53 };
+exports = wasmEvalText(`(module
+    (func (export "test") (param $lhs anyref) (param $rhs anyref) (param $i i32) (result anyref)
+        get_local $lhs
+        get_local $rhs
+        get_local $i
+        select
+    )
+)`).exports;
+
+let result = exports.test(x, y, 0);
+assertEq(result, y);
+assertEq(result.i, undefined);
+assertEq(result.f, 53);
+assertEq(x.i, 42);
+
+result = exports.test(x, y, 1);
+assertEq(result, x);
+assertEq(result.i, 42);
+assertEq(result.f, undefined);
+assertEq(y.f, 53);
+
+// Anyref in params/result of imported functions.
+
+let firstBaguette = new Baguette(13),
+    secondBaguette = new Baguette(37);
+
+let imports = {
+    i: 0,
+    myBaguette: null,
+    funcs: {
+        param(x) {
+            if (this.i === 0) {
+                assertEq(x, firstBaguette);
+                assertEq(x.calories, 13);
+                assertEq(secondBaguette !== null, true);
+            } else if (this.i === 1 || this.i === 2) {
+                assertEq(x, secondBaguette);
+                assertEq(x.calories, 37);
+                assertEq(firstBaguette !== null, true);
+            } else if (this.i === 3) {
+                assertEq(x, null);
+            } else {
+                firstBaguette = null;
+                secondBaguette = null;
+                gc(); // evil mode
+            }
+            this.i++;
+        },
+        ret() {
+            return imports.myBaguette;
+        }
+    }
+};
+
+exports = wasmEvalText(`(module
+    (import $ret "funcs" "ret" (result anyref))
+    (import $param "funcs" "param" (param anyref))
+
+    (func (export "param") (param $x anyref) (param $y anyref)
+        get_local $y
+        get_local $x
+        call $param
+        call $param
+    )
+
+    (func (export "ret") (result anyref)
+        call $ret
+    )
+)`, imports).exports;
+
+exports.param(firstBaguette, secondBaguette);
+exports.param(secondBaguette, null);
+exports.param(firstBaguette, secondBaguette);
+
+imports.myBaguette = null;
+assertEq(exports.ret(), null);
+
+imports.myBaguette = new Baguette(1337);
+assertEq(exports.ret(), imports.myBaguette);
+
+// Check lazy stubs generation.
+
+exports = wasmEvalText(`(module
+    (import $mirror "funcs" "mirror" (param anyref) (result anyref))
+    (import $augment "funcs" "augment" (param anyref) (result anyref))
+
+    (global $count_f (mut i32) (i32.const 0))
+    (global $count_g (mut i32) (i32.const 0))
+
+    (func $f (param $param anyref) (result anyref)
+        i32.const 1
+        get_global $count_f
+        i32.add
+        set_global $count_f
+
+        get_local $param
+        call $augment
+    )
+
+    (func $g (param $param anyref) (result anyref)
+        i32.const 1
+        get_global $count_g
+        i32.add
+        set_global $count_g
+
+        get_local $param
+        call $mirror
+    )
+
+    (table (export "table") 10 anyfunc)
+    (elem (i32.const 0) $f $g $mirror $augment)
+    (type $table_type (func (param anyref) (result anyref)))
+
+    (func (export "call_indirect") (param $i i32) (param $ref anyref) (result anyref)
+        get_local $ref
+        get_local $i
+        call_indirect $table_type
+    )
+
+    (func (export "count_f") (result i32) get_global $count_f)
+    (func (export "count_g") (result i32) get_global $count_g)
+)`, {
+    funcs: {
+        mirror(x) {
+            return x;
+        },
+        augment(x) {
+            x.i++;
+            x.newProp = "hello";
+            return x;
+        }
+    }
+}).exports;
+
+x = { i: 19 };
+assertEq(exports.table.get(0)(x), x);
+assertEq(x.i, 20);
+assertEq(x.newProp, "hello");
+assertEq(exports.count_f(), 1);
+assertEq(exports.count_g(), 0);
+
+x = { i: 21 };
+assertEq(exports.table.get(1)(x), x);
+assertEq(x.i, 21);
+assertEq(typeof x.newProp, "undefined");
+assertEq(exports.count_f(), 1);
+assertEq(exports.count_g(), 1);
+
+x = { i: 22 };
+assertEq(exports.table.get(2)(x), x);
+assertEq(x.i, 22);
+assertEq(typeof x.newProp, "undefined");
+assertEq(exports.count_f(), 1);
+assertEq(exports.count_g(), 1);
+
+x = { i: 23 };
+assertEq(exports.table.get(3)(x), x);
+assertEq(x.i, 24);
+assertEq(x.newProp, "hello");
+assertEq(exports.count_f(), 1);
+assertEq(exports.count_g(), 1);
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/directives.txt
@@ -0,0 +1,1 @@
+|jit-test| test-also=--wasm-gc; include:wasm.js
new file mode 100644
--- /dev/null
+++ b/js/src/jit-test/tests/wasm/gc/disabled.js
@@ -0,0 +1,28 @@
+if (wasmGcEnabled()) {
+    quit();
+}
+
+const { CompileError, validate } = WebAssembly;
+
+const UNRECOGNIZED_OPCODE_OR_BAD_TYPE = /(unrecognized opcode|bad type|invalid inline block type)/;
+
+function assertValidateError(text) {
+    assertEq(validate(wasmTextToBinary(text)), false);
+}
+
+let simpleTests = [
+    "(module (func (drop (ref.null anyref))))",
+    "(module (func $test (local anyref)))",
+    "(module (func $test (param anyref)))",
+    "(module (func $test (result anyref) (ref.null anyref)))",
+    "(module (func $test (block anyref (unreachable)) unreachable))",
+    "(module (func $test (local anyref) (result i32) (ref.is_null (get_local 0))))",
+    `(module (import "a" "b" (param anyref)))`,
+    `(module (import "a" "b" (result anyref)))`,
+];
+
+for (let src of simpleTests) {
+    print(src)
+    assertErrorMessage(() => wasmEvalText(src), CompileError, UNRECOGNIZED_OPCODE_OR_BAD_TYPE);
+    assertValidateError(src);
+}
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -291,16 +291,25 @@ struct RegI64 : public Register64
 {
     RegI64() : Register64(Register64::Invalid()) {}
     explicit RegI64(Register64 reg) : Register64(reg) {}
     bool isValid() const { return *this != Invalid(); }
     bool isInvalid() const { return !isValid(); }
     static RegI64 Invalid() { return RegI64(Register64::Invalid()); }
 };
 
+struct RegPtr : public Register
+{
+    RegPtr() : Register(Register::Invalid()) {}
+    explicit RegPtr(Register reg) : Register(reg) {}
+    bool isValid() const { return *this != Invalid(); }
+    bool isInvalid() const { return !isValid(); }
+    static RegPtr Invalid() { return RegPtr(Register::Invalid()); }
+};
+
 struct RegF32 : public FloatRegister
 {
     RegF32() : FloatRegister() {}
     explicit RegF32(FloatRegister reg) : FloatRegister(reg) {}
     bool isValid() const { return *this != Invalid(); }
     bool isInvalid() const { return !isValid(); }
     static RegF32 Invalid() { return RegF32(InvalidFloatReg); }
 };
@@ -311,20 +320,31 @@ struct RegF64 : public FloatRegister
     explicit RegF64(FloatRegister reg) : FloatRegister(reg) {}
     bool isValid() const { return *this != Invalid(); }
     bool isInvalid() const { return !isValid(); }
     static RegF64 Invalid() { return RegF64(InvalidFloatReg); }
 };
 
 struct AnyReg
 {
+    union {
+        RegI32 i32_;
+        RegI64 i64_;
+        RegPtr ref_;
+        RegF32 f32_;
+        RegF64 f64_;
+    };
+
+    enum { I32, I64, REF, F32, F64 } tag;
+
     explicit AnyReg(RegI32 r) { tag = I32; i32_ = r; }
     explicit AnyReg(RegI64 r) { tag = I64; i64_ = r; }
     explicit AnyReg(RegF32 r) { tag = F32; f32_ = r; }
     explicit AnyReg(RegF64 r) { tag = F64; f64_ = r; }
+    explicit AnyReg(RegPtr r) { tag = REF; ref_ = r; }
 
     RegI32 i32() const {
         MOZ_ASSERT(tag == I32);
         return i32_;
     }
     RegI64 i64() const {
         MOZ_ASSERT(tag == I64);
         return i64_;
@@ -332,45 +352,44 @@ struct AnyReg
     RegF32 f32() const {
         MOZ_ASSERT(tag == F32);
         return f32_;
     }
     RegF64 f64() const {
         MOZ_ASSERT(tag == F64);
         return f64_;
     }
+    RegPtr ref() const {
+        MOZ_ASSERT(tag == REF);
+        return ref_;
+    }
+
     AnyRegister any() const {
         switch (tag) {
           case F32: return AnyRegister(f32_);
           case F64: return AnyRegister(f64_);
           case I32: return AnyRegister(i32_);
           case I64:
 #ifdef JS_PUNBOX64
             return AnyRegister(i64_.reg);
 #else
             // The compiler is written so that this is never needed: any() is
             // called on arbitrary registers for asm.js but asm.js does not have
             // 64-bit ints.  For wasm, any() is called on arbitrary registers
             // only on 64-bit platforms.
             MOZ_CRASH("AnyReg::any() on 32-bit platform");
 #endif
+          case REF:
+            MOZ_CRASH("AnyReg::any() not implemented for ref types");
           default:
             MOZ_CRASH();
         }
         // Work around GCC 5 analysis/warning bug.
         MOZ_CRASH("AnyReg::any(): impossible case");
     }
-
-    union {
-        RegI32 i32_;
-        RegI64 i64_;
-        RegF32 f32_;
-        RegF64 f64_;
-    };
-    enum { I32, I64, F32, F64 } tag;
 };
 
 // Platform-specific registers.
 //
 // All platforms must define struct SpecificRegs.  All 32-bit platforms must
 // have an abiReturnRegI64 member in that struct.
 
 #if defined(JS_CODEGEN_X64)
@@ -686,16 +705,20 @@ class BaseRegAlloc
     bool isAvailableI64(RegI64 r) {
 #ifdef JS_PUNBOX64
         return isAvailableGPR(r.reg);
 #else
         return isAvailableGPR(r.low) && isAvailableGPR(r.high);
 #endif
     }
 
+    bool isAvailablePtr(RegPtr r) {
+        return isAvailableGPR(r);
+    }
+
     bool isAvailableF32(RegF32 r) {
         return isAvailableFPU(r);
     }
 
     bool isAvailableF64(RegF64 r) {
         return isAvailableFPU(r);
     }
 
@@ -721,16 +744,28 @@ class BaseRegAlloc
     }
 
     void needI64(RegI64 specific) {
         if (!isAvailableI64(specific))
             bc.sync();
         allocInt64(specific);
     }
 
+    MOZ_MUST_USE RegPtr needPtr() {
+        if (!hasGPR())
+            bc.sync();
+        return RegPtr(allocGPR());
+    }
+
+    void needPtr(RegPtr specific) {
+        if (!isAvailablePtr(specific))
+            bc.sync();
+        allocGPR(specific);
+    }
+
     MOZ_MUST_USE RegF32 needF32() {
         if (!hasFPU<MIRType::Float32>())
             bc.sync();
         return RegF32(allocFPU<MIRType::Float32>());
     }
 
     void needF32(RegF32 specific) {
         if (!isAvailableF32(specific))
@@ -753,16 +788,20 @@ class BaseRegAlloc
     void freeI32(RegI32 r) {
         freeGPR(r);
     }
 
     void freeI64(RegI64 r) {
         freeInt64(r);
     }
 
+    void freePtr(RegPtr r) {
+        freeGPR(r);
+    }
+
     void freeF64(RegF64 r) {
         freeFPU(r);
     }
 
     void freeF32(RegF32 r) {
         freeFPU(r);
     }
 
@@ -812,16 +851,20 @@ class BaseRegAlloc
 
         void addKnownF32(RegF32 r) {
             knownFPU_.add(r);
         }
 
         void addKnownF64(RegF64 r) {
             knownFPU_.add(r);
         }
+
+        void addKnownRef(RegPtr r) {
+            knownGPR_.add(r);
+        }
     };
 #endif
 };
 
 // Scratch register abstractions.
 //
 // We define our own scratch registers when the platform doesn't provide what we
 // need.  A notable use case is that we will need a private scratch register
@@ -883,33 +926,38 @@ class ScratchF32 : public ScratchFloat32
 {
   public:
     explicit ScratchF32(MacroAssembler& m) : ScratchFloat32Scope(m) {}
     operator RegF32() const { return RegF32(FloatRegister(*this)); }
 };
 #endif
 
 #ifdef RABALDR_SCRATCH_I32
-class ScratchI32 : public BaseScratchRegister
+template<class RegType>
+class ScratchGPR : public BaseScratchRegister
 {
   public:
-    explicit ScratchI32(BaseRegAlloc& ra)
+    explicit ScratchGPR(BaseRegAlloc& ra)
       : BaseScratchRegister(ra, BaseRegAlloc::ScratchKind::I32)
     {}
-    operator RegI32() const { return RegI32(RabaldrScratchI32); }
+    operator RegType() const { return RegType(RabaldrScratchI32); }
 };
 #else
-class ScratchI32 : public ScratchRegisterScope
+template<class RegType>
+class ScratchGPR : public ScratchRegisterScope
 {
   public:
-    explicit ScratchI32(MacroAssembler& m) : ScratchRegisterScope(m) {}
-    operator RegI32() const { return RegI32(Register(*this)); }
+    explicit ScratchGPR(MacroAssembler& m) : ScratchRegisterScope(m) {}
+    operator RegType() const { return RegType(Register(*this)); }
 };
 #endif
 
+using ScratchI32 = ScratchGPR<RegI32>;
+using ScratchPtr = ScratchGPR<RegPtr>;
+
 #if defined(JS_CODEGEN_X86)
 // ScratchEBX is a mnemonic device: For some atomic ops we really need EBX,
 // no other register will do.  And we would normally have to allocate that
 // register using ScratchI32 since normally the scratch register is EBX.
 // But the whole point of ScratchI32 is to hide that relationship.  By using
 // the ScratchEBX alias, we document that at that point we require the
 // scratch register to be EBX.
 using ScratchEBX = ScratchI32;
@@ -980,16 +1028,17 @@ BaseLocalIter::settle()
     if (index_ < argsLength_) {
         MOZ_ASSERT(!argsIter_.done());
         mirType_ = argsIter_.mirType();
         switch (mirType_) {
           case MIRType::Int32:
           case MIRType::Int64:
           case MIRType::Double:
           case MIRType::Float32:
+          case MIRType::Pointer:
             if (argsIter_->argInRegister())
                 frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
             else
                 frameOffset_ = -(argsIter_->offsetFromArgBase() + sizeof(Frame));
             break;
           default:
             MOZ_CRASH("Argument type");
         }
@@ -998,16 +1047,17 @@ BaseLocalIter::settle()
 
     MOZ_ASSERT(argsIter_.done());
     if (index_ < locals_.length()) {
         switch (locals_[index_]) {
           case ValType::I32:
           case ValType::I64:
           case ValType::F32:
           case ValType::F64:
+          case ValType::AnyRef:
             mirType_ = ToMIRType(locals_[index_]);
             frameOffset_ = pushLocal(MIRTypeToSize(mirType_));
             break;
           default:
             MOZ_CRASH("Compiler bug: Unexpected local type");
         }
         return;
     }
@@ -1237,17 +1287,17 @@ class BaseStackFrame
         masm.load32(Address(sp_, localOffset(src) + INT64HIGH_OFFSET), dest);
     }
 #endif
 
     void loadLocalI64(const Local& src, RegI64 dest) {
         masm.load64(Address(sp_, localOffset(src)), dest);
     }
 
-    void loadLocalPtr(const Local& src, Register dest) {
+    void loadLocalPtr(const Local& src, RegPtr dest) {
         masm.loadPtr(Address(sp_, localOffset(src)), dest);
     }
 
     void loadLocalF64(const Local& src, RegF64 dest) {
         masm.loadDouble(Address(sp_, localOffset(src)), dest);
     }
 
     void loadLocalF32(const Local& src, RegF32 dest) {
@@ -1546,17 +1596,17 @@ class BaseStackFrame
     void loadStackI64High(int32_t offset, RegI32 dest) {
         masm.load32(Address(sp_, stackOffset(offset - INT64HIGH_OFFSET)), dest);
     }
 #endif
 
     // Disambiguation: this loads a "Ptr" value from the stack, it does not load
     // the "StackPtr".
 
-    void loadStackPtr(int32_t offset, Register dest) {
+    void loadStackPtr(int32_t offset, RegPtr dest) {
         masm.loadPtr(Address(sp_, stackOffset(offset)), dest);
     }
 
     void loadStackF64(int32_t offset, RegF64 dest) {
         masm.loadDouble(Address(sp_, stackOffset(offset)), dest);
     }
 
     void loadStackF32(int32_t offset, RegF32 dest) {
@@ -1857,16 +1907,17 @@ class BaseCompiler final : public BaseCo
     SpecificRegs                specific_;
 
     // The join registers are used to carry values out of blocks.
     // JoinRegI32 and joinRegI64_ must overlap: emitBrIf and
     // emitBrTable assume that.
 
     RegI32 joinRegI32_;
     RegI64 joinRegI64_;
+    RegPtr joinRegPtr_;
     RegF32 joinRegF32_;
     RegF64 joinRegF64_;
 
     // There are more members scattered throughout.
 
   public:
     BaseCompiler(const ModuleEnvironment& env,
                  const FuncCompileInput& input,
@@ -1919,35 +1970,39 @@ class BaseCompiler final : public BaseCo
     }
 
     ////////////////////////////////////////////////////////////
     //
     // High-level register management.
 
     bool isAvailableI32(RegI32 r) { return ra.isAvailableI32(r); }
     bool isAvailableI64(RegI64 r) { return ra.isAvailableI64(r); }
+    bool isAvailableRef(RegPtr r) { return ra.isAvailablePtr(r); }
     bool isAvailableF32(RegF32 r) { return ra.isAvailableF32(r); }
     bool isAvailableF64(RegF64 r) { return ra.isAvailableF64(r); }
 
     MOZ_MUST_USE RegI32 needI32() { return ra.needI32(); }
     MOZ_MUST_USE RegI64 needI64() { return ra.needI64(); }
+    MOZ_MUST_USE RegPtr needRef() { return ra.needPtr(); }
     MOZ_MUST_USE RegF32 needF32() { return ra.needF32(); }
     MOZ_MUST_USE RegF64 needF64() { return ra.needF64(); }
 
     void needI32(RegI32 specific) { ra.needI32(specific); }
     void needI64(RegI64 specific) { ra.needI64(specific); }
+    void needRef(RegPtr specific) { ra.needPtr(specific); }
     void needF32(RegF32 specific) { ra.needF32(specific); }
     void needF64(RegF64 specific) { ra.needF64(specific); }
 
 #if defined(JS_CODEGEN_ARM)
     MOZ_MUST_USE RegI64 needI64Pair() { return ra.needI64Pair(); }
 #endif
 
     void freeI32(RegI32 r) { ra.freeI32(r); }
     void freeI64(RegI64 r) { ra.freeI64(r); }
+    void freeRef(RegPtr r) { ra.freePtr(r); }
     void freeF32(RegF32 r) { ra.freeF32(r); }
     void freeF64(RegF64 r) { ra.freeF64(r); }
 
     void freeI64Except(RegI64 r, RegI32 except) {
 #ifdef JS_PUNBOX64
         MOZ_ASSERT(r.reg == except);
 #else
         MOZ_ASSERT(r.high == except || r.low == except);
@@ -2013,16 +2068,20 @@ class BaseCompiler final : public BaseCo
 #ifdef JS_PUNBOX64
         return RegI32(r.reg);
 #else
         freeI32(RegI32(r.high));
         return RegI32(r.low);
 #endif
     }
 
+    RegI32 narrowPtr(RegPtr r) {
+        return RegI32(r);
+    }
+
     RegI32 lowPart(RegI64 r) {
 #ifdef JS_PUNBOX64
         return RegI32(r.reg);
 #else
         return RegI32(r.low);
 #endif
     }
 
@@ -2045,54 +2104,66 @@ class BaseCompiler final : public BaseCo
             masm.move32(src, dest);
     }
 
     void moveI64(RegI64 src, RegI64 dest) {
         if (src != dest)
             masm.move64(src, dest);
     }
 
+    void moveRef(RegPtr src, RegPtr dest) {
+        if (src != dest)
+            masm.movePtr(src, dest);
+    }
+
     void moveF64(RegF64 src, RegF64 dest) {
         if (src != dest)
             masm.moveDouble(src, dest);
     }
 
     void moveF32(RegF32 src, RegF32 dest) {
         if (src != dest)
             masm.moveFloat32(src, dest);
     }
 
     void maybeReserveJoinRegI(ExprType type) {
         if (type == ExprType::I32)
             needI32(joinRegI32_);
         else if (type == ExprType::I64)
             needI64(joinRegI64_);
+        else if (type == ExprType::AnyRef)
+            needRef(joinRegPtr_);
     }
 
     void maybeUnreserveJoinRegI(ExprType type) {
         if (type == ExprType::I32)
             freeI32(joinRegI32_);
         else if (type == ExprType::I64)
             freeI64(joinRegI64_);
+        else if (type == ExprType::AnyRef)
+            freeRef(joinRegPtr_);
     }
 
     void maybeReserveJoinReg(ExprType type) {
         switch (type) {
           case ExprType::I32:
             needI32(joinRegI32_);
             break;
           case ExprType::I64:
             needI64(joinRegI64_);
             break;
           case ExprType::F32:
             needF32(joinRegF32_);
             break;
           case ExprType::F64:
             needF64(joinRegF64_);
             break;
+          case ExprType::AnyRef:
+            needRef(joinRegPtr_);
+            break;
           default:
             break;
         }
     }
 
     void maybeUnreserveJoinReg(ExprType type) {
         switch (type) {
           case ExprType::I32:
@@ -2102,16 +2173,19 @@ class BaseCompiler final : public BaseCo
             freeI64(joinRegI64_);
             break;
           case ExprType::F32:
             freeF32(joinRegF32_);
             break;
           case ExprType::F64:
             freeF64(joinRegF64_);
             break;
+          case ExprType::AnyRef:
+            freeRef(joinRegPtr_);
+            break;
           default:
             break;
         }
     }
 
     ////////////////////////////////////////////////////////////
     //
     // Value stack and spilling.
@@ -2122,85 +2196,106 @@ class BaseCompiler final : public BaseCo
     // stack.
     //
     // The stack can be flushed to memory using sync().  This is handy
     // to avoid problems with control flow and messy register usage
     // patterns.
 
     struct Stk
     {
+      private:
+        Stk() : kind_(Unknown), i64val_(0) {}
+
+      public:
         enum Kind
         {
             // The Mem opcodes are all clustered at the beginning to
             // allow for a quick test within sync().
             MemI32,               // 32-bit integer stack value ("offs")
             MemI64,               // 64-bit integer stack value ("offs")
             MemF32,               // 32-bit floating stack value ("offs")
             MemF64,               // 64-bit floating stack value ("offs")
+            MemRef,               // reftype (pointer wide) stack value ("offs")
 
             // The Local opcodes follow the Mem opcodes for a similar
             // quick test within hasLocal().
             LocalI32,             // Local int32 var ("slot")
             LocalI64,             // Local int64 var ("slot")
             LocalF32,             // Local float32 var ("slot")
             LocalF64,             // Local double var ("slot")
+            LocalRef,             // Local reftype (pointer wide) var ("slot")
 
             RegisterI32,          // 32-bit integer register ("i32reg")
             RegisterI64,          // 64-bit integer register ("i64reg")
             RegisterF32,          // 32-bit floating register ("f32reg")
             RegisterF64,          // 64-bit floating register ("f64reg")
+            RegisterRef,          // reftype (pointer wide) register ("refReg")
 
             ConstI32,             // 32-bit integer constant ("i32val")
             ConstI64,             // 64-bit integer constant ("i64val")
             ConstF32,             // 32-bit floating constant ("f32val")
             ConstF64,             // 64-bit floating constant ("f64val")
+            ConstRef,             // reftype (pointer wide) constant ("refval")
+
+            Unknown,
         };
 
         Kind kind_;
 
-        static const Kind MemLast = MemF64;
-        static const Kind LocalLast = LocalF64;
+        static const Kind MemLast = MemRef;
+        static const Kind LocalLast = LocalRef;
 
         union {
             RegI32   i32reg_;
             RegI64   i64reg_;
+            RegPtr   refReg_;
             RegF32   f32reg_;
             RegF64   f64reg_;
             int32_t  i32val_;
             int64_t  i64val_;
+            intptr_t refval_;
             float    f32val_;
             double   f64val_;
             uint32_t slot_;
             uint32_t offs_;
         };
 
-        explicit Stk(RegI32 r)  : kind_(RegisterI32), i32reg_(r) {}
-        explicit Stk(RegI64 r)  : kind_(RegisterI64), i64reg_(r) {}
-        explicit Stk(RegF32 r)  : kind_(RegisterF32), f32reg_(r) {}
-        explicit Stk(RegF64 r)  : kind_(RegisterF64), f64reg_(r) {}
-        explicit Stk(int32_t v) : kind_(ConstI32), i32val_(v) {}
-        explicit Stk(int64_t v) : kind_(ConstI64), i64val_(v) {}
-        explicit Stk(float v)   : kind_(ConstF32), f32val_(v) {}
-        explicit Stk(double v)  : kind_(ConstF64), f64val_(v) {}
+        explicit Stk(RegI32 r)   : kind_(RegisterI32), i32reg_(r) {}
+        explicit Stk(RegI64 r)   : kind_(RegisterI64), i64reg_(r) {}
+        explicit Stk(RegPtr r)   : kind_(RegisterRef), refReg_(r) {}
+        explicit Stk(RegF32 r)   : kind_(RegisterF32), f32reg_(r) {}
+        explicit Stk(RegF64 r)   : kind_(RegisterF64), f64reg_(r) {}
+        explicit Stk(int32_t v)  : kind_(ConstI32), i32val_(v) {}
+        explicit Stk(int64_t v)  : kind_(ConstI64), i64val_(v) {}
+        explicit Stk(float v)    : kind_(ConstF32), f32val_(v) {}
+        explicit Stk(double v)   : kind_(ConstF64), f64val_(v) {}
         explicit Stk(Kind k, uint32_t v) : kind_(k), slot_(v) {
             MOZ_ASSERT(k > MemLast && k <= LocalLast);
         }
+        static Stk StkRef(intptr_t v) {
+            Stk s;
+            s.kind_ = ConstRef;
+            s.refval_ = v;
+            return s;
+        }
 
         void setOffs(Kind k, uint32_t v) { MOZ_ASSERT(k <= MemLast); kind_ = k; offs_ = v; }
 
         Kind kind() const { return kind_; }
         bool isMem() const { return kind_ <= MemLast; }
 
         RegI32   i32reg() const { MOZ_ASSERT(kind_ == RegisterI32); return i32reg_; }
         RegI64   i64reg() const { MOZ_ASSERT(kind_ == RegisterI64); return i64reg_; }
+        RegPtr   refReg() const { MOZ_ASSERT(kind_ == RegisterRef); return refReg_; }
         RegF32   f32reg() const { MOZ_ASSERT(kind_ == RegisterF32); return f32reg_; }
         RegF64   f64reg() const { MOZ_ASSERT(kind_ == RegisterF64); return f64reg_; }
 
         int32_t  i32val() const { MOZ_ASSERT(kind_ == ConstI32); return i32val_; }
         int64_t  i64val() const { MOZ_ASSERT(kind_ == ConstI64); return i64val_; }
+        intptr_t refval() const { MOZ_ASSERT(kind_ == ConstRef); return refval_; }
 
         // For these two, use an out-param instead of simply returning, to
         // use the normal stack and not the x87 FP stack (which has effect on
         // NaNs with the signaling bit set).
 
         void     f32val(float* out) const { MOZ_ASSERT(kind_ == ConstF32); *out = f32val_; }
         void     f64val(double* out) const { MOZ_ASSERT(kind_ == ConstF64); *out = f64val_; }
 
@@ -2210,16 +2305,20 @@ class BaseCompiler final : public BaseCo
 
     Vector<Stk, 8, SystemAllocPolicy> stk_;
 
     template<typename... Args>
     void push(Args&&... args) {
         stk_.infallibleEmplaceBack(Stk(Forward<Args>(args)...));
     }
 
+    void pushConstRef(intptr_t v) {
+        stk_.infallibleEmplaceBack(Stk::StkRef(v));
+    }
+
     void loadConstI32(const Stk& src, RegI32 dest) {
         moveImm32(src.i32val(), dest);
     }
 
     void loadMemI32(const Stk& src, RegI32 dest) {
         fr.loadStackI32(src.offs(), dest);
     }
 
@@ -2242,16 +2341,32 @@ class BaseCompiler final : public BaseCo
     void loadLocalI64(const Stk& src, RegI64 dest) {
         fr.loadLocalI64(localFromSlot(src.slot(), MIRType::Int64), dest);
     }
 
     void loadRegisterI64(const Stk& src, RegI64 dest) {
         moveI64(src.i64reg(), dest);
     }
 
+    void loadConstRef(const Stk& src, RegPtr dest) {
+        moveImmRef(src.refval(), dest);
+    }
+
+    void loadMemRef(const Stk& src, RegPtr dest) {
+        fr.loadStackPtr(src.offs(), dest);
+    }
+
+    void loadLocalRef(const Stk& src, RegPtr dest) {
+        fr.loadLocalPtr(localFromSlot(src.slot(), MIRType::Pointer), dest);
+    }
+
+    void loadRegisterRef(const Stk& src, RegPtr dest) {
+        moveRef(src.refReg(), dest);
+    }
+
     void loadConstF64(const Stk& src, RegF64 dest) {
         double d;
         src.f64val(&d);
         masm.loadConstantDouble(d, dest);
     }
 
     void loadMemF64(const Stk& src, RegF64 dest) {
         fr.loadStackF64(src.offs(), dest);
@@ -2394,16 +2509,35 @@ class BaseCompiler final : public BaseCo
           case Stk::RegisterF32:
             loadRegisterF32(src, dest);
             break;
           default:
             MOZ_CRASH("Compiler bug: expected F32 on stack");
         }
     }
 
+    void loadRef(const Stk& src, RegPtr dest) {
+        switch (src.kind()) {
+          case Stk::ConstRef:
+            loadConstRef(src, dest);
+            break;
+          case Stk::MemRef:
+            loadMemRef(src, dest);
+            break;
+          case Stk::LocalRef:
+            loadLocalRef(src, dest);
+            break;
+          case Stk::RegisterRef:
+            loadRegisterRef(src, dest);
+            break;
+          default:
+            MOZ_CRASH("Compiler bug: expected ref on stack");
+        }
+    }
+
     // Flush all local and register value stack elements to memory.
     //
     // TODO / OPTIMIZE: As this is fairly expensive and causes worse
     // code to be emitted subsequently, it is useful to avoid calling
     // it.  (Bug 1316802)
     //
     // Some optimization has been done already.  Remaining
     // opportunities:
@@ -2496,16 +2630,29 @@ class BaseCompiler final : public BaseCo
                 break;
               }
               case Stk::RegisterF32: {
                 uint32_t offs = fr.pushFloat32(v.f32reg());
                 freeF32(v.f32reg());
                 v.setOffs(Stk::MemF32, offs);
                 break;
               }
+              case Stk::LocalRef: {
+                ScratchPtr scratch(*this);
+                loadLocalRef(v, scratch);
+                uint32_t offs = fr.pushPtr(scratch);
+                v.setOffs(Stk::MemRef, offs);
+                break;
+              }
+              case Stk::RegisterRef: {
+                uint32_t offs = fr.pushPtr(v.refReg());
+                freeRef(v.refReg());
+                v.setOffs(Stk::MemRef, offs);
+                break;
+              }
               default: {
                 break;
               }
             }
         }
     }
 
     // This is an optimization used to avoid calling sync() for
@@ -2539,16 +2686,21 @@ class BaseCompiler final : public BaseCo
         push(r);
     }
 
     void pushI64(RegI64 r) {
         MOZ_ASSERT(!isAvailableI64(r));
         push(r);
     }
 
+    void pushRef(RegPtr r) {
+        MOZ_ASSERT(!isAvailableRef(r));
+        push(r);
+    }
+
     void pushF64(RegF64 r) {
         MOZ_ASSERT(!isAvailableF64(r));
         push(r);
     }
 
     void pushF32(RegF32 r) {
         MOZ_ASSERT(!isAvailableF32(r));
         push(r);
@@ -2559,16 +2711,20 @@ class BaseCompiler final : public BaseCo
     void pushI32(int32_t v) {
         push(v);
     }
 
     void pushI64(int64_t v) {
         push(v);
     }
 
+    void pushRef(intptr_t v) {
+        pushConstRef(v);
+    }
+
     void pushF64(double v) {
         push(v);
     }
 
     void pushF32(float v) {
         push(v);
     }
 
@@ -2579,16 +2735,20 @@ class BaseCompiler final : public BaseCo
     void pushLocalI32(uint32_t slot) {
         push(Stk::LocalI32, slot);
     }
 
     void pushLocalI64(uint32_t slot) {
         push(Stk::LocalI64, slot);
     }
 
+    void pushLocalRef(uint32_t slot) {
+        push(Stk::LocalRef, slot);
+    }
+
     void pushLocalF64(uint32_t slot) {
         push(Stk::LocalF64, slot);
     }
 
     void pushLocalF32(uint32_t slot) {
         push(Stk::LocalF32, slot);
     }
 
@@ -2693,16 +2853,64 @@ class BaseCompiler final : public BaseCo
             if (v.kind() == Stk::RegisterI64)
                 freeI64(v.i64reg());
         }
 
         stk_.popBack();
         return specific;
     }
 
+    // Call only from other popRef() variants.
+    // v must be the stack top.  May pop the CPU stack.
+
+    void popRef(const Stk& v, RegPtr dest) {
+        MOZ_ASSERT(&v == &stk_.back());
+        switch (v.kind()) {
+          case Stk::ConstRef:
+            loadConstRef(v, dest);
+            break;
+          case Stk::LocalRef:
+            loadLocalRef(v, dest);
+            break;
+          case Stk::MemRef:
+            fr.popPtr(dest);
+            break;
+          case Stk::RegisterRef:
+            loadRegisterRef(v, dest);
+            break;
+          default:
+            MOZ_CRASH("Compiler bug: expected ref on stack");
+        }
+    }
+
+    RegPtr popRef(RegPtr specific) {
+        Stk& v = stk_.back();
+
+        if (!(v.kind() == Stk::RegisterRef && v.refReg() == specific)) {
+            needRef(specific);
+            popRef(v, specific);
+            if (v.kind() == Stk::RegisterRef)
+                freeRef(v.refReg());
+        }
+
+        stk_.popBack();
+        return specific;
+    }
+
+    MOZ_MUST_USE RegPtr popRef() {
+        Stk& v = stk_.back();
+        RegPtr r;
+        if (v.kind() == Stk::RegisterRef)
+            r = v.refReg();
+        else
+            popRef(v, (r = needRef()));
+        stk_.popBack();
+        return r;
+    }
+
     // Call only from other popF64() variants.
     // v must be the stack top.  May pop the CPU stack.
 
     void popF64(const Stk& v, RegF64 dest) {
         MOZ_ASSERT(&v == &stk_.back());
         switch (v.kind()) {
           case Stk::ConstF64:
             loadConstF64(v, dest);
@@ -2909,16 +3117,22 @@ class BaseCompiler final : public BaseCo
             return Some(AnyReg(popF64(joinRegF64_)));
           }
           case ExprType::F32: {
             DebugOnly<Stk::Kind> k(stk_.back().kind());
             MOZ_ASSERT(k == Stk::RegisterF32 || k == Stk::ConstF32 || k == Stk::MemF32 ||
                        k == Stk::LocalF32);
             return Some(AnyReg(popF32(joinRegF32_)));
           }
+          case ExprType::AnyRef: {
+            DebugOnly<Stk::Kind> k(stk_.back().kind());
+            MOZ_ASSERT(k == Stk::RegisterRef || k == Stk::ConstRef || k == Stk::MemRef ||
+                       k == Stk::LocalRef);
+            return Some(AnyReg(popRef(joinRegPtr_)));
+          }
           default: {
             MOZ_CRASH("Compiler bug: unexpected expression type");
           }
         }
     }
 
     // If we ever start not sync-ing on entry to Block (but instead try to sync
     // lazily) then this may start asserting because it does not spill the
@@ -2939,16 +3153,20 @@ class BaseCompiler final : public BaseCo
           case ExprType::F32:
             MOZ_ASSERT(isAvailableF32(joinRegF32_));
             needF32(joinRegF32_);
             return Some(AnyReg(joinRegF32_));
           case ExprType::F64:
             MOZ_ASSERT(isAvailableF64(joinRegF64_));
             needF64(joinRegF64_);
             return Some(AnyReg(joinRegF64_));
+          case ExprType::AnyRef:
+            MOZ_ASSERT(isAvailableRef(joinRegPtr_));
+            needRef(joinRegPtr_);
+            return Some(AnyReg(joinRegPtr_));
           case ExprType::Void:
             return Nothing();
           default:
             MOZ_CRASH("Compiler bug: unexpected type");
         }
     }
 
     void pushJoinRegUnlessVoid(const Maybe<AnyReg>& r) {
@@ -2962,16 +3180,19 @@ class BaseCompiler final : public BaseCo
             pushI64(r->i64());
             break;
           case AnyReg::F64:
             pushF64(r->f64());
             break;
           case AnyReg::F32:
             pushF32(r->f32());
             break;
+          case AnyReg::REF:
+            pushRef(r->ref());
+            break;
         }
     }
 
     void freeJoinRegUnlessVoid(const Maybe<AnyReg>& r) {
         if (!r)
             return;
         switch (r->tag) {
           case AnyReg::I32:
@@ -2981,28 +3202,32 @@ class BaseCompiler final : public BaseCo
             freeI64(r->i64());
             break;
           case AnyReg::F64:
             freeF64(r->f64());
             break;
           case AnyReg::F32:
             freeF32(r->f32());
             break;
+          case AnyReg::REF:
+            freeRef(r->ref());
+            break;
         }
     }
 
     // Return the amount of execution stack consumed by the top numval
     // values on the value stack.
 
     size_t stackConsumed(size_t numval) {
         size_t size = 0;
         MOZ_ASSERT(numval <= stk_.length());
         for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
             Stk& v = stk_[i];
             switch (v.kind()) {
+              case Stk::MemRef: size += BaseStackFrame::StackSizeOfPtr;    break;
               case Stk::MemI32: size += BaseStackFrame::StackSizeOfPtr;    break;
               case Stk::MemI64: size += BaseStackFrame::StackSizeOfInt64;  break;
               case Stk::MemF64: size += BaseStackFrame::StackSizeOfDouble; break;
               case Stk::MemF32: size += BaseStackFrame::StackSizeOfFloat;  break;
               default: break;
             }
         }
         return size;
@@ -3019,16 +3244,19 @@ class BaseCompiler final : public BaseCo
                 freeI64(v.i64reg());
                 break;
               case Stk::RegisterF64:
                 freeF64(v.f64reg());
                 break;
               case Stk::RegisterF32:
                 freeF32(v.f32reg());
                 break;
+              case Stk::RegisterRef:
+                freeRef(v.refReg());
+                break;
               default:
                 break;
             }
         }
         stk_.shrinkTo(stackSize);
     }
 
     void popValueStackBy(uint32_t items) {
@@ -3065,16 +3293,19 @@ class BaseCompiler final : public BaseCo
                 check.addKnownI64(item.i64reg());
                 break;
               case Stk::RegisterF32:
                 check.addKnownF32(item.f32reg());
                 break;
               case Stk::RegisterF64:
                 check.addKnownF64(item.f64reg());
                 break;
+              case Stk::RegisterRef:
+                check.addKnownRef(item.refReg());
+                break;
               default:
                 break;
             }
         }
     }
 #endif
 
     ////////////////////////////////////////////////////////////
@@ -3154,16 +3385,19 @@ class BaseCompiler final : public BaseCo
             Local& l = localInfo_[i.index()];
             switch (i.mirType()) {
               case MIRType::Int32:
                 fr.storeLocalI32(RegI32(i->gpr()), l);
                 break;
               case MIRType::Int64:
                 fr.storeLocalI64(RegI64(i->gpr64()), l);
                 break;
+              case MIRType::Pointer:
+                fr.storeLocalPtr(RegPtr(i->gpr()), l);
+                break;
               case MIRType::Double:
                 fr.storeLocalF64(RegF64(i->fpu()), l);
                 break;
               case MIRType::Float32:
                 fr.storeLocalF32(RegF32(i->fpu()), l);
                 break;
               default:
                 MOZ_CRASH("Function argument type");
@@ -3483,16 +3717,27 @@ class BaseCompiler final : public BaseCo
                 MOZ_CRASH("Unexpected parameter passing discipline");
               }
 #endif
               case ABIArg::Uninitialized:
                 MOZ_CRASH("Uninitialized ABIArg kind");
             }
             break;
           }
+          case ValType::AnyRef: {
+            ABIArg argLoc = call->abi.next(MIRType::Pointer);
+            if (argLoc.kind() == ABIArg::Stack) {
+                ScratchPtr scratch(*this);
+                loadRef(arg, scratch);
+                masm.storePtr(scratch, Address(masm.getStackPointer(), argLoc.offsetFromArgBase()));
+            } else {
+                loadRef(arg, RegPtr(argLoc.gpr()));
+            }
+            break;
+          }
           default:
             MOZ_CRASH("Function argument type");
         }
     }
 
     void callDefinition(uint32_t funcIndex, const FunctionCall& call)
     {
         CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
@@ -3556,16 +3801,20 @@ class BaseCompiler final : public BaseCo
     void moveImm32(int32_t v, RegI32 dest) {
         masm.move32(Imm32(v), dest);
     }
 
     void moveImm64(int64_t v, RegI64 dest) {
         masm.move64(Imm64(v), dest);
     }
 
+    void moveImmRef(intptr_t v, RegPtr dest) {
+        masm.movePtr(ImmWord(v), dest);
+    }
+
     void moveImmF32(float f, RegF32 dest) {
         masm.loadConstantFloat32(f, dest);
     }
 
     void moveImmF64(double d, RegF64 dest) {
         masm.loadConstantDouble(d, dest);
     }
 
@@ -3686,16 +3935,23 @@ class BaseCompiler final : public BaseCo
         needF64(r);
 #if defined(JS_CODEGEN_ARM)
         if (call.usesSystemAbi && !call.hardFP)
             masm.ma_vxfer(ReturnReg64.low, ReturnReg64.high, r);
 #endif
         return r;
     }
 
+    RegPtr captureReturnedRef() {
+        RegPtr r = RegPtr(ReturnReg);
+        MOZ_ASSERT(isAvailableRef(r));
+        needRef(r);
+        return r;
+    }
+
     void returnCleanup(bool popStack) {
         if (popStack)
             fr.popStackBeforeBranch(controlOutermost().stackHeight);
         masm.jump(&returnLabel_);
     }
 
     void checkDivideByZeroI32(RegI32 rhs, RegI32 srcDest, Label* done) {
         Label nonZero;
@@ -5270,16 +5526,21 @@ class BaseCompiler final : public BaseCo
         *r0 = popF32();
     }
 
     void pop2xF64(RegF64* r0, RegF64* r1) {
         *r1 = popF64();
         *r0 = popF64();
     }
 
+    void pop2xRef(RegPtr* r0, RegPtr* r1) {
+        *r1 = popRef();
+        *r0 = popRef();
+    }
+
     RegI32 popI64ToI32() {
         RegI64 r = popI64();
         return narrowI64(r);
     }
 
     RegI32 popI64ToSpecificI32(RegI32 specific) {
         RegI64 rd = widenI32(specific);
         popI64ToSpecific(rd);
@@ -5609,16 +5870,19 @@ class BaseCompiler final : public BaseCo
     void emitReinterpretI32AsF32();
     void emitReinterpretI64AsF64();
     void emitRound(RoundingMode roundingMode, ValType operandType);
     void emitInstanceCall(uint32_t lineOrBytecode, const MIRTypeVector& sig,
                           ExprType retType, SymbolicAddress builtin);
     MOZ_MUST_USE bool emitGrowMemory();
     MOZ_MUST_USE bool emitCurrentMemory();
 
+    MOZ_MUST_USE bool emitRefNull();
+    void emitRefIsNull();
+
     MOZ_MUST_USE bool emitAtomicCmpXchg(ValType type, Scalar::Type viewType);
     MOZ_MUST_USE bool emitAtomicLoad(ValType type, Scalar::Type viewType);
     MOZ_MUST_USE bool emitAtomicRMW(ValType type, Scalar::Type viewType, AtomicOp op);
     MOZ_MUST_USE bool emitAtomicStore(ValType type, Scalar::Type viewType);
     MOZ_MUST_USE bool emitWait(ValType type, uint32_t byteSize);
     MOZ_MUST_USE bool emitWake();
     MOZ_MUST_USE bool emitAtomicXchg(ValType type, Scalar::Type viewType);
     void emitAtomicXchg64(MemoryAccessDesc* access, ValType type, WantResult wantResult);
@@ -6846,20 +7110,19 @@ BaseCompiler::sniffConditionalControlCmp
     // only have five.
     if (operandType == ValType::I64)
         return false;
 #endif
 
     OpBytes op;
     iter_.peekOp(&op);
     switch (op.b0) {
-      case uint16_t(Op::Select):
-        MOZ_FALLTHROUGH;
       case uint16_t(Op::BrIf):
       case uint16_t(Op::If):
+      case uint16_t(Op::Select):
         setLatentCompare(compareOp, operandType);
         return true;
       default:
         return false;
     }
 }
 
 bool
@@ -7431,16 +7694,22 @@ BaseCompiler::doReturn(ExprType type, bo
         break;
       }
       case ExprType::F32: {
         RegF32 rv = popF32(RegF32(ReturnFloat32Reg));
         returnCleanup(popStack);
         freeF32(rv);
         break;
       }
+      case ExprType::AnyRef: {
+        RegPtr rv = popRef(RegPtr(ReturnReg));
+        returnCleanup(popStack);
+        freeRef(rv);
+        break;
+      }
       default: {
         MOZ_CRASH("Function return type");
       }
     }
 }
 
 bool
 BaseCompiler::emitReturn()
@@ -7495,16 +7764,21 @@ BaseCompiler::pushReturned(const Functio
         pushF32(rv);
         break;
       }
       case ExprType::F64: {
         RegF64 rv = captureReturnedF64(call);
         pushF64(rv);
         break;
       }
+      case ExprType::AnyRef: {
+        RegPtr rv = captureReturnedRef();
+        pushRef(rv);
+        break;
+      }
       default:
         MOZ_CRASH("Function return type");
     }
 }
 
 // For now, always sync() at the beginning of the call to easily save live
 // values.
 //
@@ -7822,16 +8096,19 @@ BaseCompiler::emitGetLocal()
         pushLocalI64(slot);
         break;
       case ValType::F64:
         pushLocalF64(slot);
         break;
       case ValType::F32:
         pushLocalF32(slot);
         break;
+      case ValType::AnyRef:
+        pushLocalRef(slot);
+        break;
       default:
         MOZ_CRASH("Local variable type");
     }
 
     return true;
 }
 
 template<bool isSetLocal>
@@ -7878,16 +8155,26 @@ BaseCompiler::emitSetOrTeeLocal(uint32_t
         syncLocal(slot);
         fr.storeLocalF32(rv, localFromSlot(slot, MIRType::Float32));
         if (isSetLocal)
             freeF32(rv);
         else
             pushF32(rv);
         break;
       }
+      case ValType::AnyRef: {
+        RegPtr rv = popRef();
+        syncLocal(slot);
+        fr.storeLocalPtr(rv, localFromSlot(slot, MIRType::Pointer));
+        if (isSetLocal)
+            freeRef(rv);
+        else
+            pushRef(rv);
+        break;
+      }
       default:
         MOZ_CRASH("Local variable type");
     }
 
     return true;
 }
 
 bool
@@ -8398,16 +8685,26 @@ BaseCompiler::emitSelect()
         pop2xF64(&r, &rs);
         emitBranchPerform(&b);
         moveF64(rs, r);
         masm.bind(&done);
         freeF64(rs);
         pushF64(r);
         break;
       }
+      case ValType::AnyRef: {
+        RegPtr r, rs;
+        pop2xRef(&r, &rs);
+        emitBranchPerform(&b);
+        moveRef(rs, r);
+        masm.bind(&done);
+        freeRef(rs);
+        pushRef(r);
+        break;
+      }
       default: {
         MOZ_CRASH("select type");
       }
     }
 
     return true;
 }
 
@@ -8553,16 +8850,39 @@ BaseCompiler::emitCurrentMemory()
     if (deadCode_)
         return true;
 
     emitInstanceCall(lineOrBytecode, SigP_, ExprType::I32, SymbolicAddress::CurrentMemory);
     return true;
 }
 
 bool
+BaseCompiler::emitRefNull()
+{
+    if (!iter_.readRefNull())
+        return false;
+
+    if (deadCode_)
+        return true;
+
+    pushRef(NULLREF_VALUE);
+    return true;
+}
+
+void
+BaseCompiler::emitRefIsNull()
+{
+    RegPtr r = popRef();
+    RegI32 rd = narrowPtr(r);
+
+    masm.cmpPtrSet(Assembler::Equal, r, ImmWord(NULLREF_VALUE), rd);
+    pushI32(rd);
+}
+
+bool
 BaseCompiler::emitAtomicCmpXchg(ValType type, Scalar::Type viewType)
 {
     LinearMemoryAddress<Nothing> addr;
     Nothing unused;
 
     if (!iter_.readAtomicCmpXchg(&addr, type, Scalar::byteSize(viewType), &unused, &unused))
         return false;
 
@@ -9419,16 +9739,29 @@ BaseCompiler::emitBody()
 #endif
 
           // Memory Related
           case uint16_t(Op::GrowMemory):
             CHECK_NEXT(emitGrowMemory());
           case uint16_t(Op::CurrentMemory):
             CHECK_NEXT(emitCurrentMemory());
 
+#ifdef ENABLE_WASM_GC
+          case uint16_t(Op::RefNull):
+            if (env_.gcTypesEnabled == HasGcTypes::False)
+                return iter_.unrecognizedOpcode(&op);
+            CHECK_NEXT(emitRefNull());
+            break;
+          case uint16_t(Op::RefIsNull):
+            if (env_.gcTypesEnabled == HasGcTypes::False)
+                return iter_.unrecognizedOpcode(&op);
+            CHECK_NEXT(emitConversion(emitRefIsNull, ValType::AnyRef, ValType::I32));
+            break;
+#endif
+
           // Numeric operations
           case uint16_t(Op::NumericPrefix): {
 #ifdef ENABLE_WASM_SATURATING_TRUNC_OPS
             switch (op.b1) {
               case uint16_t(NumericOp::I32TruncSSatF32):
                 CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI32<TRUNC_SATURATING>,
                                              ValType::F32, ValType::I32));
               case uint16_t(NumericOp::I32TruncUSatF32):
@@ -9697,16 +10030,17 @@ BaseCompiler::BaseCompiler(const ModuleE
       latentType_(ValType::I32),
       latentIntCmp_(Assembler::Equal),
       latentDoubleCmp_(Assembler::DoubleEqual),
       masm(*masm),
       ra(*this),
       fr(*masm),
       joinRegI32_(RegI32(ReturnReg)),
       joinRegI64_(RegI64(ReturnReg64)),
+      joinRegPtr_(RegPtr(ReturnReg)),
       joinRegF32_(RegF32(ReturnFloat32Reg)),
       joinRegF64_(RegF64(ReturnDoubleReg))
 {
 }
 
 bool
 BaseCompiler::init()
 {
--- a/js/src/wasm/WasmBinaryConstants.h
+++ b/js/src/wasm/WasmBinaryConstants.h
@@ -93,16 +93,20 @@ enum class ValType
     F32x4                                = uint8_t(TypeCode::F32x4),
     B8x16                                = uint8_t(TypeCode::B8x16),
     B16x8                                = uint8_t(TypeCode::B16x8),
     B32x4                                = uint8_t(TypeCode::B32x4)
 };
 
 typedef Vector<ValType, 8, SystemAllocPolicy> ValTypeVector;
 
+// The representation of a null reference value throughout the compiler.
+
+static const intptr_t NULLREF_VALUE = intptr_t((void*)nullptr);
+
 enum class DefinitionKind
 {
     Function                             = 0x00,
     Table                                = 0x01,
     Memory                               = 0x02,
     Global                               = 0x03
 };