Bug 1377576 - Define text-to-binary machinery. r=sunfish
authorLars T Hansen <lhansen@mozilla.com>
Sun, 02 Jul 2017 10:11:05 -0700
changeset 437645 21fb0572b28cd614df9cef36d776f9de7aff5207
parent 437644 a80521fee6419412226c8f69a66a8e77ffef6c0f
child 437646 e13ccb86b41700ecff390736b101472a38c8b4ab
push id117
push userfmarier@mozilla.com
push dateTue, 28 Nov 2017 20:17:16 +0000
reviewerssunfish
bugs1377576
milestone59.0a1
Bug 1377576 - Define text-to-binary machinery. r=sunfish
js/src/wasm/WasmAST.h
js/src/wasm/WasmTextToBinary.cpp
--- a/js/src/wasm/WasmAST.h
+++ b/js/src/wasm/WasmAST.h
@@ -178,16 +178,20 @@ class AstNode : public AstBase
     AstNode() : offset_(AstNodeUnknownOffset) {}
 
     uint32_t offset() const { return offset_; }
     void setOffset(uint32_t offset) { offset_ = offset; }
 };
 
 enum class AstExprKind
 {
+    AtomicCmpXchg,
+    AtomicLoad,
+    AtomicRMW,
+    AtomicStore,
     BinaryOperator,
     Block,
     Branch,
     BranchTable,
     Call,
     CallIndirect,
     ComparisonOperator,
     Const,
@@ -204,17 +208,19 @@ enum class AstExprKind
     Pop,
     Return,
     SetGlobal,
     SetLocal,
     TeeLocal,
     Store,
     TernaryOperator,
     UnaryOperator,
-    Unreachable
+    Unreachable,
+    Wait,
+    Wake
 };
 
 class AstExpr : public AstNode
 {
     const AstExprKind kind_;
     ExprType type_;
 
   protected:
@@ -537,16 +543,138 @@ class AstStore : public AstExpr
         value_(value)
     {}
 
     Op op() const { return op_; }
     const AstLoadStoreAddress& address() const { return address_; }
     AstExpr& value() const { return *value_; }
 };
 
+class AstAtomicCmpXchg : public AstExpr
+{
+    ThreadOp op_;
+    AstLoadStoreAddress address_;
+    AstExpr* expected_;
+    AstExpr* replacement_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::AtomicCmpXchg;
+    explicit AstAtomicCmpXchg(ThreadOp op, const AstLoadStoreAddress &address, AstExpr* expected,
+                              AstExpr* replacement)
+      : AstExpr(Kind, ExprType::Limit),
+        op_(op),
+        address_(address),
+        expected_(expected),
+        replacement_(replacement)
+    {}
+
+    ThreadOp op() const { return op_; }
+    const AstLoadStoreAddress& address() const { return address_; }
+    AstExpr& expected() const { return *expected_; }
+    AstExpr& replacement() const { return *replacement_; }
+};
+
+class AstAtomicLoad : public AstExpr
+{
+    ThreadOp op_;
+    AstLoadStoreAddress address_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::AtomicLoad;
+    explicit AstAtomicLoad(ThreadOp op, const AstLoadStoreAddress &address)
+      : AstExpr(Kind, ExprType::Limit),
+        op_(op),
+        address_(address)
+    {}
+
+    ThreadOp op() const { return op_; }
+    const AstLoadStoreAddress& address() const { return address_; }
+};
+
+class AstAtomicRMW : public AstExpr
+{
+    ThreadOp op_;
+    AstLoadStoreAddress address_;
+    AstExpr* value_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::AtomicRMW;
+    explicit AstAtomicRMW(ThreadOp op, const AstLoadStoreAddress &address, AstExpr* value)
+      : AstExpr(Kind, ExprType::Limit),
+        op_(op),
+        address_(address),
+        value_(value)
+    {}
+
+    ThreadOp op() const { return op_; }
+    const AstLoadStoreAddress& address() const { return address_; }
+    AstExpr& value() const { return *value_; }
+};
+
+class AstAtomicStore : public AstExpr
+{
+    ThreadOp op_;
+    AstLoadStoreAddress address_;
+    AstExpr* value_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::AtomicStore;
+    explicit AstAtomicStore(ThreadOp op, const AstLoadStoreAddress &address, AstExpr* value)
+      : AstExpr(Kind, ExprType::Void),
+        op_(op),
+        address_(address),
+        value_(value)
+    {}
+
+    ThreadOp op() const { return op_; }
+    const AstLoadStoreAddress& address() const { return address_; }
+    AstExpr& value() const { return *value_; }
+};
+
+class AstWait : public AstExpr
+{
+    ThreadOp op_;
+    AstLoadStoreAddress address_;
+    AstExpr* expected_;
+    AstExpr* timeout_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::Wait;
+    explicit AstWait(ThreadOp op, const AstLoadStoreAddress &address, AstExpr* expected,
+                     AstExpr* timeout)
+      : AstExpr(Kind, ExprType::I32),
+        op_(op),
+        address_(address),
+        expected_(expected),
+        timeout_(timeout)
+    {}
+
+    ThreadOp op() const { return op_; }
+    const AstLoadStoreAddress& address() const { return address_; }
+    AstExpr& expected() const { return *expected_; }
+    AstExpr& timeout() const { return *timeout_; }
+};
+
+class AstWake : public AstExpr
+{
+    AstLoadStoreAddress address_;
+    AstExpr* count_;
+
+  public:
+    static const AstExprKind Kind = AstExprKind::Wake;
+    explicit AstWake(const AstLoadStoreAddress &address, AstExpr* count)
+      : AstExpr(Kind, ExprType::I32),
+        address_(address),
+        count_(count)
+    {}
+
+    const AstLoadStoreAddress& address() const { return address_; }
+    AstExpr& count() const { return *count_; }
+};
+
 class AstCurrentMemory final : public AstExpr
 {
   public:
     static const AstExprKind Kind = AstExprKind::CurrentMemory;
     explicit AstCurrentMemory()
       : AstExpr(Kind, ExprType::I32)
     {}
 };
--- a/js/src/wasm/WasmTextToBinary.cpp
+++ b/js/src/wasm/WasmTextToBinary.cpp
@@ -62,16 +62,20 @@ class WasmToken
         Infinity,
         NaN
     };
 
     enum Kind
     {
         Align,
         AnyFunc,
+        AtomicCmpXchg,
+        AtomicLoad,
+        AtomicRMW,
+        AtomicStore,
         BinaryOpcode,
         Block,
         Br,
         BrIf,
         BrTable,
         Call,
         CallIndirect,
         CloseParen,
@@ -121,29 +125,32 @@ class WasmToken
         TeeLocal,
         TernaryOpcode,
         Text,
         Then,
         Type,
         UnaryOpcode,
         Unreachable,
         UnsignedInteger,
-        ValueType
+        ValueType,
+        Wait,
+        Wake
     };
   private:
     Kind kind_;
     const char16_t* begin_;
     const char16_t* end_;
     union {
         uint32_t index_;
         uint64_t uint_;
         int64_t sint_;
         FloatLiteralKind floatLiteralKind_;
         ValType valueType_;
         Op op_;
+        ThreadOp threadOp_;
     } u;
   public:
     WasmToken()
       : kind_(Kind(-1)),
         begin_(nullptr),
         end_(nullptr),
         u()
     { }
@@ -203,16 +210,26 @@ class WasmToken
         end_(end)
     {
         MOZ_ASSERT(begin != end);
         MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
                    kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
                    kind_ == Load || kind_ == Store);
         u.op_ = op;
     }
+    explicit WasmToken(Kind kind, ThreadOp op, const char16_t* begin, const char16_t* end)
+      : kind_(kind),
+        begin_(begin),
+        end_(end)
+    {
+        MOZ_ASSERT(begin != end);
+        MOZ_ASSERT(kind_ == AtomicCmpXchg || kind_ == AtomicLoad || kind_ == AtomicRMW ||
+                   kind_ == AtomicStore || kind_ == Wait || kind_ == Wake);
+        u.threadOp_ = op;
+    }
     explicit WasmToken(const char16_t* begin)
       : kind_(Error),
         begin_(begin),
         end_(begin)
     {}
     Kind kind() const {
         MOZ_ASSERT(kind_ != Kind(-1));
         return kind_;
@@ -254,18 +271,27 @@ class WasmToken
         return u.valueType_;
     }
     Op op() const {
         MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
                    kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
                    kind_ == Load || kind_ == Store);
         return u.op_;
     }
+    ThreadOp threadOp() const {
+        MOZ_ASSERT(kind_ == AtomicCmpXchg || kind_ == AtomicLoad || kind_ == AtomicRMW ||
+                   kind_ == AtomicStore || kind_ == Wait || kind_ == Wake);
+        return u.threadOp_;
+    }
     bool isOpcode() const {
         switch (kind_) {
+          case AtomicCmpXchg:
+          case AtomicLoad:
+          case AtomicRMW:
+          case AtomicStore:
           case BinaryOpcode:
           case Block:
           case Br:
           case BrIf:
           case BrTable:
           case Call:
           case CallIndirect:
           case ComparisonOpcode:
@@ -283,16 +309,18 @@ class WasmToken
           case Return:
           case SetGlobal:
           case SetLocal:
           case Store:
           case TeeLocal:
           case TernaryOpcode:
           case UnaryOpcode:
           case Unreachable:
+          case Wait:
+          case Wake:
             return true;
           case Align:
           case AnyFunc:
           case CloseParen:
           case Data:
           case Elem:
           case Else:
           case EndOfFile:
@@ -828,16 +856,20 @@ WasmTokenStream::next()
       case '5': case '6': case '7': case '8': case '9':
         return literal(begin);
 
       case 'a':
         if (consume(u"align"))
             return WasmToken(WasmToken::Align, begin, cur_);
         if (consume(u"anyfunc"))
             return WasmToken(WasmToken::AnyFunc, begin, cur_);
+#ifdef ENABLE_WASM_THREAD_OPS
+        if (consume(u"atomic.wake"))
+            return WasmToken(WasmToken::Wake, ThreadOp::Wake, begin, cur_);
+#endif
         break;
 
       case 'b':
         if (consume(u"block"))
             return WasmToken(WasmToken::Block, begin, cur_);
         if (consume(u"br")) {
             if (consume(u"_table"))
                 return WasmToken(WasmToken::BrTable, begin, cur_);
@@ -1100,16 +1132,76 @@ WasmTokenStream::next()
                 return WasmToken(WasmToken::ValueType, ValType::I32, begin, cur_);
 
             switch (*cur_) {
               case 'a':
                 if (consume(u"add"))
                     return WasmToken(WasmToken::BinaryOpcode, Op::I32Add, begin, cur_);
                 if (consume(u"and"))
                     return WasmToken(WasmToken::BinaryOpcode, Op::I32And, begin, cur_);
+#ifdef ENABLE_WASM_THREAD_OPS
+                if (consume(u"atomic.")) {
+                    if (consume(u"rmw8_u.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAdd8U, begin, cur_);
+                    if (consume(u"rmw16_u.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAdd16U, begin, cur_);
+                    if (consume(u"rmw.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAdd, begin, cur_);
+                    if (consume(u"rmw8_u.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAnd8U, begin, cur_);
+                    if (consume(u"rmw16_u.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAnd16U, begin, cur_);
+                    if (consume(u"rmw.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicAnd, begin, cur_);
+                    if (consume(u"rmw8_u.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I32AtomicCmpXchg8U, begin, cur_);
+                    if (consume(u"rmw16_u.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I32AtomicCmpXchg16U, begin, cur_);
+                    if (consume(u"rmw.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I32AtomicCmpXchg, begin, cur_);
+                    if (consume(u"load8_u"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I32AtomicLoad8U, begin, cur_);
+                    if (consume(u"load16_u"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I32AtomicLoad16U, begin, cur_);
+                    if (consume(u"load"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I32AtomicLoad, begin, cur_);
+                    if (consume(u"rmw8_u.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicOr8U, begin, cur_);
+                    if (consume(u"rmw16_u.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicOr16U, begin, cur_);
+                    if (consume(u"rmw.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicOr, begin, cur_);
+                    if (consume(u"store8_u"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I32AtomicStore8U, begin, cur_);
+                    if (consume(u"store16_u"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I32AtomicStore16U, begin, cur_);
+                    if (consume(u"store"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I32AtomicStore, begin, cur_);
+                    if (consume(u"rmw8_u.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicSub8U, begin, cur_);
+                    if (consume(u"rmw16_u.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicSub16U, begin, cur_);
+                    if (consume(u"rmw.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicSub, begin, cur_);
+                    if (consume(u"rmw8_u.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXor8U, begin, cur_);
+                    if (consume(u"rmw16_u.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXor16U, begin, cur_);
+                    if (consume(u"rmw.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXor, begin, cur_);
+                    if (consume(u"rmw8_u.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXchg8U, begin, cur_);
+                    if (consume(u"rmw16_u.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXchg16U, begin, cur_);
+                    if (consume(u"rmw.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I32AtomicXchg, begin, cur_);
+                    if (consume(u"wait"))
+                        return WasmToken(WasmToken::Wait, ThreadOp::I32Wait, begin, cur_);
+                }
+#endif // ENABLE_WASM_THREAD_OPS
                 break;
               case 'c':
                 if (consume(u"const"))
                     return WasmToken(WasmToken::Const, ValType::I32, begin, cur_);
                 if (consume(u"clz"))
                     return WasmToken(WasmToken::UnaryOpcode, Op::I32Clz, begin, cur_);
                 if (consume(u"ctz"))
                     return WasmToken(WasmToken::UnaryOpcode, Op::I32Ctz, begin, cur_);
@@ -1244,16 +1336,94 @@ WasmTokenStream::next()
                 return WasmToken(WasmToken::ValueType, ValType::I64, begin, cur_);
 
             switch (*cur_) {
               case 'a':
                 if (consume(u"add"))
                     return WasmToken(WasmToken::BinaryOpcode, Op::I64Add, begin, cur_);
                 if (consume(u"and"))
                     return WasmToken(WasmToken::BinaryOpcode, Op::I64And, begin, cur_);
+#ifdef ENABLE_WASM_THREAD_OPS
+                if (consume(u"atomic.")) {
+                    if (consume(u"rmw8_u.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAdd8U, begin, cur_);
+                    if (consume(u"rmw16_u.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAdd16U, begin, cur_);
+                    if (consume(u"rmw32_u.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAdd32U, begin, cur_);
+                    if (consume(u"rmw.add"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAdd, begin, cur_);
+                    if (consume(u"rmw8_u.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAnd8U, begin, cur_);
+                    if (consume(u"rmw16_u.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAnd16U, begin, cur_);
+                    if (consume(u"rmw32_u.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAnd32U, begin, cur_);
+                    if (consume(u"rmw.and"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicAnd, begin, cur_);
+                    if (consume(u"rmw8_u.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I64AtomicCmpXchg8U, begin, cur_);
+                    if (consume(u"rmw16_u.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I64AtomicCmpXchg16U, begin, cur_);
+                    if (consume(u"rmw32_u.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I64AtomicCmpXchg32U, begin, cur_);
+                    if (consume(u"rmw.cmpxchg"))
+                        return WasmToken(WasmToken::AtomicCmpXchg, ThreadOp::I64AtomicCmpXchg, begin, cur_);
+                    if (consume(u"load8_u"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I64AtomicLoad8U, begin, cur_);
+                    if (consume(u"load16_u"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I64AtomicLoad16U, begin, cur_);
+                    if (consume(u"load32_u"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I64AtomicLoad32U, begin, cur_);
+                    if (consume(u"load"))
+                        return WasmToken(WasmToken::AtomicLoad, ThreadOp::I64AtomicLoad, begin, cur_);
+                    if (consume(u"rmw8_u.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicOr8U, begin, cur_);
+                    if (consume(u"rmw16_u.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicOr16U, begin, cur_);
+                    if (consume(u"rmw32_u.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicOr32U, begin, cur_);
+                    if (consume(u"rmw.or"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicOr, begin, cur_);
+                    if (consume(u"store8_u"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I64AtomicStore8U, begin, cur_);
+                    if (consume(u"store16_u"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I64AtomicStore16U, begin, cur_);
+                    if (consume(u"store32_u"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I64AtomicStore32U, begin, cur_);
+                    if (consume(u"store"))
+                        return WasmToken(WasmToken::AtomicStore, ThreadOp::I64AtomicStore, begin, cur_);
+                    if (consume(u"rmw8_u.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicSub8U, begin, cur_);
+                    if (consume(u"rmw16_u.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicSub16U, begin, cur_);
+                    if (consume(u"rmw32_u.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicSub32U, begin, cur_);
+                    if (consume(u"rmw.sub"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicSub, begin, cur_);
+                    if (consume(u"rmw8_u.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXor8U, begin, cur_);
+                    if (consume(u"rmw16_u.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXor16U, begin, cur_);
+                    if (consume(u"rmw32_u.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXor32U, begin, cur_);
+                    if (consume(u"rmw.xor"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXor, begin, cur_);
+                    if (consume(u"rmw8_u.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXchg8U, begin, cur_);
+                    if (consume(u"rmw16_u.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXchg16U, begin, cur_);
+                    if (consume(u"rmw32_u.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXchg32U, begin, cur_);
+                    if (consume(u"rmw.xchg"))
+                        return WasmToken(WasmToken::AtomicRMW, ThreadOp::I64AtomicXchg, begin, cur_);
+                    if (consume(u"wait"))
+                        return WasmToken(WasmToken::Wait, ThreadOp::I64Wait, begin, cur_);
+                }
+#endif // ENABLE_WASM_THREAD_OPS
                 break;
               case 'c':
                 if (consume(u"const"))
                     return WasmToken(WasmToken::Const, ValType::I64, begin, cur_);
                 if (consume(u"clz"))
                     return WasmToken(WasmToken::UnaryOpcode, Op::I64Clz, begin, cur_);
                 if (consume(u"ctz"))
                     return WasmToken(WasmToken::UnaryOpcode, Op::I64Ctz, begin, cur_);
@@ -1380,16 +1550,18 @@ WasmTokenStream::next()
                                      begin, cur_);
                 if (consume(u"trunc_u/f32"))
                     return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncUF32,
                                      begin, cur_);
                 if (consume(u"trunc_u/f64"))
                     return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncUF64,
                                      begin, cur_);
                 break;
+              case 'w':
+                break;
               case 'x':
                 if (consume(u"xor"))
                     return WasmToken(WasmToken::BinaryOpcode, Op::I64Xor, begin, cur_);
                 break;
             }
             break;
         }
         if (consume(u"import"))
@@ -2385,16 +2557,270 @@ ParseStore(WasmParseContext& c, Op op, b
     if (!value)
         return nullptr;
 
     uint32_t flags = alignLog2;
 
     return new(c.lifo) AstStore(op, AstLoadStoreAddress(base, flags, offset), value);
 }
 
+static AstAtomicCmpXchg*
+ParseAtomicCmpXchg(WasmParseContext& c, ThreadOp op, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    if (alignLog2 == UINT32_MAX) {
+        switch (op) {
+          case ThreadOp::I32AtomicCmpXchg8U:
+          case ThreadOp::I64AtomicCmpXchg8U:
+            alignLog2 = 0;
+            break;
+          case ThreadOp::I32AtomicCmpXchg16U:
+          case ThreadOp::I64AtomicCmpXchg16U:
+            alignLog2 = 1;
+            break;
+          case ThreadOp::I32AtomicCmpXchg:
+          case ThreadOp::I64AtomicCmpXchg32U:
+            alignLog2 = 2;
+            break;
+          case ThreadOp::I64AtomicCmpXchg:
+            alignLog2 = 3;
+            break;
+          default:
+            MOZ_CRASH("Bad cmpxchg op");
+        }
+    }
+
+    AstExpr* expected = ParseExpr(c, inParens);
+    if (!expected)
+        return nullptr;
+
+    AstExpr* replacement = ParseExpr(c, inParens);
+    if (!replacement)
+        return nullptr;
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstAtomicCmpXchg(op, AstLoadStoreAddress(base, flags, offset), expected,
+                                        replacement);
+}
+
+static AstAtomicLoad*
+ParseAtomicLoad(WasmParseContext& c, ThreadOp op, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    if (alignLog2 == UINT32_MAX) {
+        switch (op) {
+          case ThreadOp::I32AtomicLoad8U:
+          case ThreadOp::I64AtomicLoad8U:
+            alignLog2 = 0;
+            break;
+          case ThreadOp::I32AtomicLoad16U:
+          case ThreadOp::I64AtomicLoad16U:
+            alignLog2 = 1;
+            break;
+          case ThreadOp::I32AtomicLoad:
+          case ThreadOp::I64AtomicLoad32U:
+            alignLog2 = 2;
+            break;
+          case ThreadOp::I64AtomicLoad:
+            alignLog2 = 3;
+            break;
+          default:
+            MOZ_CRASH("Bad load op");
+        }
+    }
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstAtomicLoad(op, AstLoadStoreAddress(base, flags, offset));
+}
+
+static AstAtomicRMW*
+ParseAtomicRMW(WasmParseContext& c, ThreadOp op, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    if (alignLog2 == UINT32_MAX) {
+        switch (op) {
+          case ThreadOp::I32AtomicAdd8U:
+          case ThreadOp::I64AtomicAdd8U:
+          case ThreadOp::I32AtomicAnd8U:
+          case ThreadOp::I64AtomicAnd8U:
+          case ThreadOp::I32AtomicOr8U:
+          case ThreadOp::I64AtomicOr8U:
+          case ThreadOp::I32AtomicSub8U:
+          case ThreadOp::I64AtomicSub8U:
+          case ThreadOp::I32AtomicXor8U:
+          case ThreadOp::I64AtomicXor8U:
+          case ThreadOp::I32AtomicXchg8U:
+          case ThreadOp::I64AtomicXchg8U:
+            alignLog2 = 0;
+            break;
+          case ThreadOp::I32AtomicAdd16U:
+          case ThreadOp::I64AtomicAdd16U:
+          case ThreadOp::I32AtomicAnd16U:
+          case ThreadOp::I64AtomicAnd16U:
+          case ThreadOp::I32AtomicOr16U:
+          case ThreadOp::I64AtomicOr16U:
+          case ThreadOp::I32AtomicSub16U:
+          case ThreadOp::I64AtomicSub16U:
+          case ThreadOp::I32AtomicXor16U:
+          case ThreadOp::I64AtomicXor16U:
+          case ThreadOp::I32AtomicXchg16U:
+          case ThreadOp::I64AtomicXchg16U:
+            alignLog2 = 1;
+            break;
+          case ThreadOp::I32AtomicAdd:
+          case ThreadOp::I64AtomicAdd32U:
+          case ThreadOp::I32AtomicAnd:
+          case ThreadOp::I64AtomicAnd32U:
+          case ThreadOp::I32AtomicOr:
+          case ThreadOp::I64AtomicOr32U:
+          case ThreadOp::I32AtomicSub:
+          case ThreadOp::I64AtomicSub32U:
+          case ThreadOp::I32AtomicXor:
+          case ThreadOp::I64AtomicXor32U:
+          case ThreadOp::I32AtomicXchg:
+          case ThreadOp::I64AtomicXchg32U:
+            alignLog2 = 2;
+            break;
+          case ThreadOp::I64AtomicAdd:
+          case ThreadOp::I64AtomicAnd:
+          case ThreadOp::I64AtomicOr:
+          case ThreadOp::I64AtomicSub:
+          case ThreadOp::I64AtomicXor:
+          case ThreadOp::I64AtomicXchg:
+            alignLog2 = 3;
+            break;
+          default:
+            MOZ_CRASH("Bad RMW op");
+        }
+    }
+
+    AstExpr* value = ParseExpr(c, inParens);
+    if (!value)
+        return nullptr;
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstAtomicRMW(op, AstLoadStoreAddress(base, flags, offset), value);
+}
+
+static AstAtomicStore*
+ParseAtomicStore(WasmParseContext& c, ThreadOp op, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    if (alignLog2 == UINT32_MAX) {
+        switch (op) {
+          case ThreadOp::I32AtomicStore8U:
+          case ThreadOp::I64AtomicStore8U:
+            alignLog2 = 0;
+            break;
+          case ThreadOp::I32AtomicStore16U:
+          case ThreadOp::I64AtomicStore16U:
+            alignLog2 = 1;
+            break;
+          case ThreadOp::I32AtomicStore:
+          case ThreadOp::I64AtomicStore32U:
+            alignLog2 = 2;
+            break;
+          case ThreadOp::I64AtomicStore:
+            alignLog2 = 3;
+            break;
+          default:
+            MOZ_CRASH("Bad store op");
+        }
+    }
+
+    AstExpr* value = ParseExpr(c, inParens);
+    if (!value)
+        return nullptr;
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstAtomicStore(op, AstLoadStoreAddress(base, flags, offset), value);
+}
+
+static AstWait*
+ParseWait(WasmParseContext& c, ThreadOp op, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    if (alignLog2 == UINT32_MAX) {
+        switch (op) {
+          case ThreadOp::I32Wait:
+            alignLog2 = 2;
+            break;
+          case ThreadOp::I64Wait:
+            alignLog2 = 3;
+            break;
+          default:
+            MOZ_CRASH("Bad wait op");
+        }
+    }
+
+    AstExpr* expected = ParseExpr(c, inParens);
+    if (!expected)
+        return nullptr;
+
+    AstExpr* timeout = ParseExpr(c, inParens);
+    if (!timeout)
+        return nullptr;
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstWait(op, AstLoadStoreAddress(base, flags, offset), expected, timeout);
+}
+
+static AstWake*
+ParseWake(WasmParseContext& c, bool inParens)
+{
+    int32_t offset;
+    uint32_t alignLog2;
+    AstExpr* base;
+    if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+        return nullptr;
+
+    // Per spec, the required (and default) alignment is 4, because the smallest
+    // access is int32.
+    if (alignLog2 == UINT32_MAX)
+        alignLog2 = 2;
+
+    AstExpr* count = ParseExpr(c, inParens);
+    if (!count)
+        return nullptr;
+
+    uint32_t flags = alignLog2;
+
+    return new(c.lifo) AstWake(AstLoadStoreAddress(base, flags, offset), count);
+}
+
 static AstBranchTable*
 ParseBranchTable(WasmParseContext& c, WasmToken brTable, bool inParens)
 {
     AstRefVector table(c.lifo);
 
     AstRef target;
     while (c.ts.getIfRef(&target)) {
         if (!table.append(target))
@@ -2438,16 +2864,28 @@ ParseGrowMemory(WasmParseContext& c, boo
 }
 
 static AstExpr*
 ParseExprBody(WasmParseContext& c, WasmToken token, bool inParens)
 {
     switch (token.kind()) {
       case WasmToken::Unreachable:
         return new(c.lifo) AstUnreachable;
+      case WasmToken::AtomicCmpXchg:
+        return ParseAtomicCmpXchg(c, token.threadOp(), inParens);
+      case WasmToken::AtomicLoad:
+        return ParseAtomicLoad(c, token.threadOp(), inParens);
+      case WasmToken::AtomicRMW:
+        return ParseAtomicRMW(c, token.threadOp(), inParens);
+      case WasmToken::AtomicStore:
+        return ParseAtomicStore(c, token.threadOp(), inParens);
+      case WasmToken::Wait:
+        return ParseWait(c, token.threadOp(), inParens);
+      case WasmToken::Wake:
+        return ParseWake(c, inParens);
       case WasmToken::BinaryOpcode:
         return ParseBinaryOperator(c, token.op(), inParens);
       case WasmToken::Block:
         return ParseBlock(c, Op::Block, inParens);
       case WasmToken::Br:
         return ParseBranch(c, Op::Br, inParens);
       case WasmToken::BrIf:
         return ParseBranch(c, Op::BrIf, inParens);
@@ -3748,16 +4186,59 @@ ResolveBranchTable(Resolver& r, AstBranc
 
     if (bt.maybeValue() && !ResolveExpr(r, *bt.maybeValue()))
         return false;
 
     return ResolveExpr(r, bt.index());
 }
 
 static bool
+ResolveAtomicCmpXchg(Resolver& r, AstAtomicCmpXchg& s)
+{
+    return ResolveLoadStoreAddress(r, s.address()) &&
+           ResolveExpr(r, s.expected()) &&
+           ResolveExpr(r, s.replacement());
+}
+
+static bool
+ResolveAtomicLoad(Resolver& r, AstAtomicLoad& l)
+{
+    return ResolveLoadStoreAddress(r, l.address());
+}
+
+static bool
+ResolveAtomicRMW(Resolver& r, AstAtomicRMW& s)
+{
+    return ResolveLoadStoreAddress(r, s.address()) &&
+           ResolveExpr(r, s.value());
+}
+
+static bool
+ResolveAtomicStore(Resolver& r, AstAtomicStore& s)
+{
+    return ResolveLoadStoreAddress(r, s.address()) &&
+           ResolveExpr(r, s.value());
+}
+
+static bool
+ResolveWait(Resolver& r, AstWait& s)
+{
+    return ResolveLoadStoreAddress(r, s.address()) &&
+           ResolveExpr(r, s.expected()) &&
+           ResolveExpr(r, s.timeout());
+}
+
+static bool
+ResolveWake(Resolver& r, AstWake& s)
+{
+    return ResolveLoadStoreAddress(r, s.address()) &&
+           ResolveExpr(r, s.count());
+}
+
+static bool
 ResolveExpr(Resolver& r, AstExpr& expr)
 {
     switch (expr.kind()) {
       case AstExprKind::Nop:
       case AstExprKind::Pop:
       case AstExprKind::Unreachable:
       case AstExprKind::CurrentMemory:
         return true;
@@ -3802,16 +4283,28 @@ ResolveExpr(Resolver& r, AstExpr& expr)
       case AstExprKind::TeeLocal:
         return ResolveTeeLocal(r, expr.as<AstTeeLocal>());
       case AstExprKind::TernaryOperator:
         return ResolveTernaryOperator(r, expr.as<AstTernaryOperator>());
       case AstExprKind::UnaryOperator:
         return ResolveUnaryOperator(r, expr.as<AstUnaryOperator>());
       case AstExprKind::GrowMemory:
         return ResolveGrowMemory(r, expr.as<AstGrowMemory>());
+      case AstExprKind::AtomicCmpXchg:
+        return ResolveAtomicCmpXchg(r, expr.as<AstAtomicCmpXchg>());
+      case AstExprKind::AtomicLoad:
+        return ResolveAtomicLoad(r, expr.as<AstAtomicLoad>());
+      case AstExprKind::AtomicRMW:
+        return ResolveAtomicRMW(r, expr.as<AstAtomicRMW>());
+      case AstExprKind::AtomicStore:
+        return ResolveAtomicStore(r, expr.as<AstAtomicStore>());
+      case AstExprKind::Wait:
+        return ResolveWait(r, expr.as<AstWait>());
+      case AstExprKind::Wake:
+        return ResolveWake(r, expr.as<AstWake>());
     }
     MOZ_CRASH("Bad expr kind");
 }
 
 static bool
 ResolveFunc(Resolver& r, AstFunc& func)
 {
     r.beginFunc();
@@ -4280,16 +4773,71 @@ EncodeGrowMemory(Encoder& e, AstGrowMemo
 
     if (!e.writeVarU32(uint32_t(MemoryTableFlags::Default)))
         return false;
 
     return true;
 }
 
 static bool
+EncodeAtomicCmpXchg(Encoder& e, AstAtomicCmpXchg& s)
+{
+    return EncodeLoadStoreAddress(e, s.address()) &&
+           EncodeExpr(e, s.expected()) &&
+           EncodeExpr(e, s.replacement()) &&
+           e.writeOp(s.op()) &&
+           EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
+EncodeAtomicLoad(Encoder& e, AstAtomicLoad& l)
+{
+    return EncodeLoadStoreAddress(e, l.address()) &&
+           e.writeOp(l.op()) &&
+           EncodeLoadStoreFlags(e, l.address());
+}
+
+static bool
+EncodeAtomicRMW(Encoder& e, AstAtomicRMW& s)
+{
+    return EncodeLoadStoreAddress(e, s.address()) &&
+           EncodeExpr(e, s.value()) &&
+           e.writeOp(s.op()) &&
+           EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
+EncodeAtomicStore(Encoder& e, AstAtomicStore& s)
+{
+    return EncodeLoadStoreAddress(e, s.address()) &&
+           EncodeExpr(e, s.value()) &&
+           e.writeOp(s.op()) &&
+           EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
+EncodeWait(Encoder& e, AstWait& s)
+{
+    return EncodeLoadStoreAddress(e, s.address()) &&
+           EncodeExpr(e, s.expected()) &&
+           EncodeExpr(e, s.timeout()) &&
+           e.writeOp(s.op()) &&
+           EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
+EncodeWake(Encoder& e, AstWake& s)
+{
+    return EncodeLoadStoreAddress(e, s.address()) &&
+           EncodeExpr(e, s.count()) &&
+           e.writeOp(ThreadOp::Wake) &&
+           EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
 EncodeExpr(Encoder& e, AstExpr& expr)
 {
     switch (expr.kind()) {
       case AstExprKind::Pop:
         return true;
       case AstExprKind::Nop:
         return e.writeOp(Op::Nop);
       case AstExprKind::Unreachable:
@@ -4337,16 +4885,28 @@ EncodeExpr(Encoder& e, AstExpr& expr)
       case AstExprKind::TernaryOperator:
         return EncodeTernaryOperator(e, expr.as<AstTernaryOperator>());
       case AstExprKind::UnaryOperator:
         return EncodeUnaryOperator(e, expr.as<AstUnaryOperator>());
       case AstExprKind::CurrentMemory:
         return EncodeCurrentMemory(e, expr.as<AstCurrentMemory>());
       case AstExprKind::GrowMemory:
         return EncodeGrowMemory(e, expr.as<AstGrowMemory>());
+      case AstExprKind::AtomicCmpXchg:
+        return EncodeAtomicCmpXchg(e, expr.as<AstAtomicCmpXchg>());
+      case AstExprKind::AtomicLoad:
+        return EncodeAtomicLoad(e, expr.as<AstAtomicLoad>());
+      case AstExprKind::AtomicRMW:
+        return EncodeAtomicRMW(e, expr.as<AstAtomicRMW>());
+      case AstExprKind::AtomicStore:
+        return EncodeAtomicStore(e, expr.as<AstAtomicStore>());
+      case AstExprKind::Wait:
+        return EncodeWait(e, expr.as<AstWait>());
+      case AstExprKind::Wake:
+        return EncodeWake(e, expr.as<AstWake>());
     }
     MOZ_CRASH("Bad expr kind");
 }
 
 /*****************************************************************************/
 // wasm AST binary serialization
 
 static bool