Bug 1377576 - Define binary-to-text machinery. r=sunfish
authorLars T Hansen <lhansen@mozilla.com>
Sun, 02 Jul 2017 15:17:04 -0700
changeset 701924 e13ccb86b41700ecff390736b101472a38c8b4ab
parent 701923 21fb0572b28cd614df9cef36d776f9de7aff5207
child 701925 37c0194e2b9ae59ae0a0917fe18e34510e75f92a
push id90308
push userbmo:lhansen@mozilla.com
push dateWed, 22 Nov 2017 12:45:04 +0000
reviewerssunfish
bugs1377576
milestone59.0a1
Bug 1377576 - Define binary-to-text machinery. r=sunfish
js/src/wasm/WasmBinaryToAST.cpp
js/src/wasm/WasmBinaryToText.cpp
--- a/js/src/wasm/WasmBinaryToAST.cpp
+++ b/js/src/wasm/WasmBinaryToAST.cpp
@@ -963,16 +963,271 @@ AstDecodeReturn(AstDecodeContext& c)
 
     if (!c.push(AstDecodeStackItem(ret)))
         return false;
 
     return true;
 }
 
 static bool
+AstDecodeAtomicLoad(AstDecodeContext& c, ThreadOp op)
+{
+    ValType type;
+    uint32_t byteSize;
+    switch (op) {
+      case ThreadOp::I32AtomicLoad:    type = ValType::I32; byteSize = 4; break;
+      case ThreadOp::I64AtomicLoad:    type = ValType::I64; byteSize = 8; break;
+      case ThreadOp::I32AtomicLoad8U:  type = ValType::I32; byteSize = 1; break;
+      case ThreadOp::I32AtomicLoad16U: type = ValType::I32; byteSize = 2; break;
+      case ThreadOp::I64AtomicLoad8U:  type = ValType::I64; byteSize = 1; break;
+      case ThreadOp::I64AtomicLoad16U: type = ValType::I64; byteSize = 2; break;
+      case ThreadOp::I64AtomicLoad32U: type = ValType::I64; byteSize = 4; break;
+      default:
+        MOZ_CRASH("Should not happen");
+    }
+
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readAtomicLoad(&addr, type, byteSize))
+        return false;
+
+    AstDecodeStackItem item = c.popCopy();
+
+    AstAtomicLoad* load = new(c.lifo) AstAtomicLoad(op, AstDecodeLoadStoreAddress(addr, item));
+    if (!load)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(load)))
+        return false;
+
+    return true;
+}
+
+static bool
+AstDecodeAtomicStore(AstDecodeContext& c, ThreadOp op)
+{
+    ValType type;
+    uint32_t byteSize;
+    switch (op) {
+      case ThreadOp::I32AtomicStore:    type = ValType::I32; byteSize = 4; break;
+      case ThreadOp::I64AtomicStore:    type = ValType::I64; byteSize = 8; break;
+      case ThreadOp::I32AtomicStore8U:  type = ValType::I32; byteSize = 1; break;
+      case ThreadOp::I32AtomicStore16U: type = ValType::I32; byteSize = 2; break;
+      case ThreadOp::I64AtomicStore8U:  type = ValType::I64; byteSize = 1; break;
+      case ThreadOp::I64AtomicStore16U: type = ValType::I64; byteSize = 2; break;
+      case ThreadOp::I64AtomicStore32U: type = ValType::I64; byteSize = 4; break;
+      default:
+        MOZ_CRASH("Should not happen");
+    }
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readAtomicStore(&addr, type, byteSize, &nothing))
+        return false;
+
+    AstDecodeStackItem value = c.popCopy();
+    AstDecodeStackItem item = c.popCopy();
+
+    AstAtomicStore* store = new(c.lifo) AstAtomicStore(op, AstDecodeLoadStoreAddress(addr, item), value.expr);
+    if (!store)
+        return false;
+
+    AstExpr* wrapped = c.handleVoidExpr(store);
+    if (!wrapped)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(wrapped)))
+        return false;
+
+    return true;
+}
+
+static bool
+AstDecodeAtomicRMW(AstDecodeContext& c, ThreadOp op)
+{
+    ValType type;
+    uint32_t byteSize;
+    switch (op) {
+      case ThreadOp::I32AtomicAdd:
+      case ThreadOp::I32AtomicSub:
+      case ThreadOp::I32AtomicAnd:
+      case ThreadOp::I32AtomicOr:
+      case ThreadOp::I32AtomicXor:
+      case ThreadOp::I32AtomicXchg:
+        type = ValType::I32;
+        byteSize = 4;
+        break;
+      case ThreadOp::I64AtomicAdd:
+      case ThreadOp::I64AtomicSub:
+      case ThreadOp::I64AtomicAnd:
+      case ThreadOp::I64AtomicOr:
+      case ThreadOp::I64AtomicXor:
+      case ThreadOp::I64AtomicXchg:
+        type = ValType::I64;
+        byteSize = 8;
+        break;
+      case ThreadOp::I32AtomicAdd8U:
+      case ThreadOp::I32AtomicSub8U:
+      case ThreadOp::I32AtomicOr8U:
+      case ThreadOp::I32AtomicXor8U:
+      case ThreadOp::I32AtomicXchg8U:
+      case ThreadOp::I32AtomicAnd8U:
+        type = ValType::I32;
+        byteSize = 1;
+        break;
+      case ThreadOp::I32AtomicAdd16U:
+      case ThreadOp::I32AtomicSub16U:
+      case ThreadOp::I32AtomicAnd16U:
+      case ThreadOp::I32AtomicOr16U:
+      case ThreadOp::I32AtomicXor16U:
+      case ThreadOp::I32AtomicXchg16U:
+        type = ValType::I32;
+        byteSize = 2;
+        break;
+      case ThreadOp::I64AtomicAdd8U:
+      case ThreadOp::I64AtomicSub8U:
+      case ThreadOp::I64AtomicAnd8U:
+      case ThreadOp::I64AtomicOr8U:
+      case ThreadOp::I64AtomicXor8U:
+      case ThreadOp::I64AtomicXchg8U:
+        type = ValType::I64;
+        byteSize = 1;
+        break;
+      case ThreadOp::I64AtomicAdd16U:
+      case ThreadOp::I64AtomicSub16U:
+      case ThreadOp::I64AtomicAnd16U:
+      case ThreadOp::I64AtomicOr16U:
+      case ThreadOp::I64AtomicXor16U:
+      case ThreadOp::I64AtomicXchg16U:
+        type = ValType::I64;
+        byteSize = 2;
+        break;
+      case ThreadOp::I64AtomicAdd32U:
+      case ThreadOp::I64AtomicSub32U:
+      case ThreadOp::I64AtomicAnd32U:
+      case ThreadOp::I64AtomicOr32U:
+      case ThreadOp::I64AtomicXor32U:
+      case ThreadOp::I64AtomicXchg32U:
+        type = ValType::I64;
+        byteSize = 4;
+        break;
+      default:
+        MOZ_CRASH("Should not happen");
+    }
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readAtomicRMW(&addr, type, byteSize, &nothing))
+        return false;
+
+    AstDecodeStackItem value = c.popCopy();
+    AstDecodeStackItem item = c.popCopy();
+
+    AstAtomicRMW* rmw = new(c.lifo) AstAtomicRMW(op, AstDecodeLoadStoreAddress(addr, item),
+                                                 value.expr);
+    if (!rmw)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(rmw)))
+        return false;
+
+    return true;
+}
+
+static bool
+AstDecodeAtomicCmpXchg(AstDecodeContext& c, ThreadOp op)
+{
+    ValType type;
+    uint32_t byteSize;
+    switch (op) {
+      case ThreadOp::I32AtomicCmpXchg:    type = ValType::I32; byteSize = 4; break;
+      case ThreadOp::I64AtomicCmpXchg:    type = ValType::I64; byteSize = 8; break;
+      case ThreadOp::I32AtomicCmpXchg8U:  type = ValType::I32; byteSize = 1; break;
+      case ThreadOp::I32AtomicCmpXchg16U: type = ValType::I32; byteSize = 2; break;
+      case ThreadOp::I64AtomicCmpXchg8U:  type = ValType::I64; byteSize = 1; break;
+      case ThreadOp::I64AtomicCmpXchg16U: type = ValType::I64; byteSize = 2; break;
+      case ThreadOp::I64AtomicCmpXchg32U: type = ValType::I64; byteSize = 4; break;
+      default:
+        MOZ_CRASH("Should not happen");
+    }
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readAtomicCmpXchg(&addr, type, byteSize, &nothing, &nothing))
+        return false;
+
+    AstDecodeStackItem replacement = c.popCopy();
+    AstDecodeStackItem expected = c.popCopy();
+    AstDecodeStackItem item = c.popCopy();
+
+    AstAtomicCmpXchg* cmpxchg =
+        new(c.lifo) AstAtomicCmpXchg(op, AstDecodeLoadStoreAddress(addr, item), expected.expr,
+                                     replacement.expr);
+    if (!cmpxchg)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(cmpxchg)))
+        return false;
+
+    return true;
+}
+
+static bool
+AstDecodeWait(AstDecodeContext& c, ThreadOp op)
+{
+    ValType type;
+    uint32_t byteSize;
+    switch (op) {
+      case ThreadOp::I32Wait: type = ValType::I32; byteSize = 4; break;
+      case ThreadOp::I64Wait: type = ValType::I64; byteSize = 8; break;
+      default:
+        MOZ_CRASH("Should not happen");
+    }
+
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readWait(&addr, type, byteSize, &nothing, &nothing))
+        return false;
+
+    AstDecodeStackItem timeout = c.popCopy();
+    AstDecodeStackItem value = c.popCopy();
+    AstDecodeStackItem item = c.popCopy();
+
+    AstWait* wait = new(c.lifo) AstWait(op, AstDecodeLoadStoreAddress(addr, item), value.expr,
+                                        timeout.expr);
+    if (!wait)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(wait)))
+        return false;
+
+    return true;
+}
+
+static bool
+AstDecodeWake(AstDecodeContext& c)
+{
+    Nothing nothing;
+    LinearMemoryAddress<Nothing> addr;
+    if (!c.iter().readWake(&addr, &nothing))
+        return false;
+
+    AstDecodeStackItem count = c.popCopy();
+    AstDecodeStackItem item = c.popCopy();
+
+    AstWake* wake = new(c.lifo) AstWake(AstDecodeLoadStoreAddress(addr, item), count.expr);
+    if (!wake)
+        return false;
+
+    if (!c.push(AstDecodeStackItem(wake)))
+        return false;
+
+    return true;
+}
+
+static bool
 AstDecodeExpr(AstDecodeContext& c)
 {
     uint32_t exprOffset = c.iter().currentOffset();
     OpBytes op;
     if (!c.iter().readOp(&op))
         return false;
 
     AstExpr* tmp;
@@ -1382,17 +1637,105 @@ AstDecodeExpr(AstDecodeContext& c)
             return false;
         tmp = new(c.lifo) AstUnreachable();
         if (!tmp)
             return false;
         if (!c.push(AstDecodeStackItem(tmp)))
             return false;
         break;
       case uint16_t(Op::ThreadPrefix):
-        MOZ_CRASH("ThreadPrefix ops not yet implemented");
+        switch (op.b1) {
+          case uint16_t(ThreadOp::Wake):
+            if (!AstDecodeWake(c))
+                return false;
+            break;
+          case uint16_t(ThreadOp::I32Wait):
+          case uint16_t(ThreadOp::I64Wait):
+            if (!AstDecodeWait(c, ThreadOp(op.b1)))
+                return false;
+            break;
+          case uint16_t(ThreadOp::I32AtomicLoad):
+          case uint16_t(ThreadOp::I64AtomicLoad):
+          case uint16_t(ThreadOp::I32AtomicLoad8U):
+          case uint16_t(ThreadOp::I32AtomicLoad16U):
+          case uint16_t(ThreadOp::I64AtomicLoad8U):
+          case uint16_t(ThreadOp::I64AtomicLoad16U):
+          case uint16_t(ThreadOp::I64AtomicLoad32U):
+            if (!AstDecodeAtomicLoad(c, ThreadOp(op.b1)))
+                return false;
+            break;
+          case uint16_t(ThreadOp::I32AtomicStore):
+          case uint16_t(ThreadOp::I64AtomicStore):
+          case uint16_t(ThreadOp::I32AtomicStore8U):
+          case uint16_t(ThreadOp::I32AtomicStore16U):
+          case uint16_t(ThreadOp::I64AtomicStore8U):
+          case uint16_t(ThreadOp::I64AtomicStore16U):
+          case uint16_t(ThreadOp::I64AtomicStore32U):
+            if (!AstDecodeAtomicStore(c, ThreadOp(op.b1)))
+                return false;
+            break;
+          case uint16_t(ThreadOp::I32AtomicAdd):
+          case uint16_t(ThreadOp::I64AtomicAdd):
+          case uint16_t(ThreadOp::I32AtomicAdd8U):
+          case uint16_t(ThreadOp::I32AtomicAdd16U):
+          case uint16_t(ThreadOp::I64AtomicAdd8U):
+          case uint16_t(ThreadOp::I64AtomicAdd16U):
+          case uint16_t(ThreadOp::I64AtomicAdd32U):
+          case uint16_t(ThreadOp::I32AtomicSub):
+          case uint16_t(ThreadOp::I64AtomicSub):
+          case uint16_t(ThreadOp::I32AtomicSub8U):
+          case uint16_t(ThreadOp::I32AtomicSub16U):
+          case uint16_t(ThreadOp::I64AtomicSub8U):
+          case uint16_t(ThreadOp::I64AtomicSub16U):
+          case uint16_t(ThreadOp::I64AtomicSub32U):
+          case uint16_t(ThreadOp::I32AtomicAnd):
+          case uint16_t(ThreadOp::I64AtomicAnd):
+          case uint16_t(ThreadOp::I32AtomicAnd8U):
+          case uint16_t(ThreadOp::I32AtomicAnd16U):
+          case uint16_t(ThreadOp::I64AtomicAnd8U):
+          case uint16_t(ThreadOp::I64AtomicAnd16U):
+          case uint16_t(ThreadOp::I64AtomicAnd32U):
+          case uint16_t(ThreadOp::I32AtomicOr):
+          case uint16_t(ThreadOp::I64AtomicOr):
+          case uint16_t(ThreadOp::I32AtomicOr8U):
+          case uint16_t(ThreadOp::I32AtomicOr16U):
+          case uint16_t(ThreadOp::I64AtomicOr8U):
+          case uint16_t(ThreadOp::I64AtomicOr16U):
+          case uint16_t(ThreadOp::I64AtomicOr32U):
+          case uint16_t(ThreadOp::I32AtomicXor):
+          case uint16_t(ThreadOp::I64AtomicXor):
+          case uint16_t(ThreadOp::I32AtomicXor8U):
+          case uint16_t(ThreadOp::I32AtomicXor16U):
+          case uint16_t(ThreadOp::I64AtomicXor8U):
+          case uint16_t(ThreadOp::I64AtomicXor16U):
+          case uint16_t(ThreadOp::I64AtomicXor32U):
+          case uint16_t(ThreadOp::I32AtomicXchg):
+          case uint16_t(ThreadOp::I64AtomicXchg):
+          case uint16_t(ThreadOp::I32AtomicXchg8U):
+          case uint16_t(ThreadOp::I32AtomicXchg16U):
+          case uint16_t(ThreadOp::I64AtomicXchg8U):
+          case uint16_t(ThreadOp::I64AtomicXchg16U):
+          case uint16_t(ThreadOp::I64AtomicXchg32U):
+            if (!AstDecodeAtomicRMW(c, ThreadOp(op.b1)))
+                return false;
+            break;
+          case uint16_t(ThreadOp::I32AtomicCmpXchg):
+          case uint16_t(ThreadOp::I64AtomicCmpXchg):
+          case uint16_t(ThreadOp::I32AtomicCmpXchg8U):
+          case uint16_t(ThreadOp::I32AtomicCmpXchg16U):
+          case uint16_t(ThreadOp::I64AtomicCmpXchg8U):
+          case uint16_t(ThreadOp::I64AtomicCmpXchg16U):
+          case uint16_t(ThreadOp::I64AtomicCmpXchg32U):
+            if (!AstDecodeAtomicCmpXchg(c, ThreadOp(op.b1)))
+                return false;
+            break;
+          default:
+            return c.iter().unrecognizedOpcode(&op);
+        }
+        break;
       case uint16_t(Op::MozPrefix):
         return c.iter().unrecognizedOpcode(&op);
       default:
         return c.iter().unrecognizedOpcode(&op);
     }
 
     AstExpr* lastExpr = c.top().expr;
     if (lastExpr) {
--- a/js/src/wasm/WasmBinaryToText.cpp
+++ b/js/src/wasm/WasmBinaryToText.cpp
@@ -1042,16 +1042,221 @@ RenderReturn(WasmRenderContext& c, AstRe
     if (!RenderIndent(c))
         return false;
 
     MAP_AST_EXPR(c, ret);
     return c.buffer.append("return");
 }
 
 static bool
+RenderAtomicCmpXchg(WasmRenderContext& c, AstAtomicCmpXchg& cmpxchg)
+{
+    if (!RenderLoadStoreBase(c, cmpxchg.address()))
+        return false;
+
+    if (!RenderExpr(c, cmpxchg.expected()))
+        return false;
+    if (!RenderExpr(c, cmpxchg.replacement()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    MAP_AST_EXPR(c, cmpxchg);
+    const char* opname;
+    switch (cmpxchg.op()) {
+      case ThreadOp::I32AtomicCmpXchg8U:  opname = "i32.atomic.rmw8_u.cmpxchg"; break;
+      case ThreadOp::I64AtomicCmpXchg8U:  opname = "i64.atomic.rmw8_u.cmpxchg"; break;
+      case ThreadOp::I32AtomicCmpXchg16U: opname = "i32.atomic.rmw16_u.cmpxchg"; break;
+      case ThreadOp::I64AtomicCmpXchg16U: opname = "i64.atomic.rmw16_u.cmpxchg"; break;
+      case ThreadOp::I64AtomicCmpXchg32U: opname = "i64.atomic.rmw32_u.cmpxchg"; break;
+      case ThreadOp::I32AtomicCmpXchg:    opname = "i32.atomic.rmw.cmpxchg"; break;
+      case ThreadOp::I64AtomicCmpXchg:    opname = "i64.atomic.rmw.cmpxchg"; break;
+      default:                            return Fail(c, "unexpected cmpxchg operator");
+    }
+
+    if (!c.buffer.append(opname, strlen(opname)))
+        return false;
+
+    return RenderLoadStoreAddress(c, cmpxchg.address(), 0);
+}
+
+static bool
+RenderAtomicLoad(WasmRenderContext& c, AstAtomicLoad& load)
+{
+    if (!RenderLoadStoreBase(c, load.address()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    MAP_AST_EXPR(c, load);
+    const char* opname;
+    switch (load.op()) {
+      case ThreadOp::I32AtomicLoad8U:  opname = "i32.atomic.load8_u"; break;
+      case ThreadOp::I64AtomicLoad8U:  opname = "i64.atomic.load8_u"; break;
+      case ThreadOp::I32AtomicLoad16U: opname = "i32.atomic.load16_u"; break;
+      case ThreadOp::I64AtomicLoad16U: opname = "i64.atomic.load16_u"; break;
+      case ThreadOp::I64AtomicLoad32U: opname = "i64.atomic.load32_u"; break;
+      case ThreadOp::I32AtomicLoad:    opname = "i32.atomic.load"; break;
+      case ThreadOp::I64AtomicLoad:    opname = "i64.atomic.load"; break;
+      default:                         return Fail(c, "unexpected load operator");
+    }
+
+    if (!c.buffer.append(opname, strlen(opname)))
+        return false;
+
+    return RenderLoadStoreAddress(c, load.address(), 0);
+}
+
+static bool
+RenderAtomicRMW(WasmRenderContext& c, AstAtomicRMW& rmw)
+{
+    if (!RenderLoadStoreBase(c, rmw.address()))
+        return false;
+
+    if (!RenderExpr(c, rmw.value()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    MAP_AST_EXPR(c, rmw);
+    const char* opname;
+    switch (rmw.op()) {
+      case ThreadOp::I32AtomicAdd:     opname = "i32.atomic.rmw.add"; break;
+      case ThreadOp::I64AtomicAdd:     opname = "i64.atomic.rmw.add"; break;
+      case ThreadOp::I32AtomicAdd8U:   opname = "i32.atomic.rmw8_u.add"; break;
+      case ThreadOp::I32AtomicAdd16U:  opname = "i32.atomic.rmw16_u.add"; break;
+      case ThreadOp::I64AtomicAdd8U:   opname = "i64.atomic.rmw8_u.add"; break;
+      case ThreadOp::I64AtomicAdd16U:  opname = "i64.atomic.rmw16_u.add"; break;
+      case ThreadOp::I64AtomicAdd32U:  opname = "i64.atomic.rmw32_u.add"; break;
+      case ThreadOp::I32AtomicSub:     opname = "i32.atomic.rmw.sub"; break;
+      case ThreadOp::I64AtomicSub:     opname = "i64.atomic.rmw.sub"; break;
+      case ThreadOp::I32AtomicSub8U:   opname = "i32.atomic.rmw8_u.sub"; break;
+      case ThreadOp::I32AtomicSub16U:  opname = "i32.atomic.rmw16_u.sub"; break;
+      case ThreadOp::I64AtomicSub8U:   opname = "i64.atomic.rmw8_u.sub"; break;
+      case ThreadOp::I64AtomicSub16U:  opname = "i64.atomic.rmw16_u.sub"; break;
+      case ThreadOp::I64AtomicSub32U:  opname = "i64.atomic.rmw32_u.sub"; break;
+      case ThreadOp::I32AtomicAnd:     opname = "i32.atomic.rmw.and"; break;
+      case ThreadOp::I64AtomicAnd:     opname = "i64.atomic.rmw.and"; break;
+      case ThreadOp::I32AtomicAnd8U:   opname = "i32.atomic.rmw8_u.and"; break;
+      case ThreadOp::I32AtomicAnd16U:  opname = "i32.atomic.rmw16_u.and"; break;
+      case ThreadOp::I64AtomicAnd8U:   opname = "i64.atomic.rmw8_u.and"; break;
+      case ThreadOp::I64AtomicAnd16U:  opname = "i64.atomic.rmw16_u.and"; break;
+      case ThreadOp::I64AtomicAnd32U:  opname = "i64.atomic.rmw32_u.and"; break;
+      case ThreadOp::I32AtomicOr:      opname = "i32.atomic.rmw.or"; break;
+      case ThreadOp::I64AtomicOr:      opname = "i64.atomic.rmw.or"; break;
+      case ThreadOp::I32AtomicOr8U:    opname = "i32.atomic.rmw8_u.or"; break;
+      case ThreadOp::I32AtomicOr16U:   opname = "i32.atomic.rmw16_u.or"; break;
+      case ThreadOp::I64AtomicOr8U:    opname = "i64.atomic.rmw8_u.or"; break;
+      case ThreadOp::I64AtomicOr16U:   opname = "i64.atomic.rmw16_u.or"; break;
+      case ThreadOp::I64AtomicOr32U:   opname = "i64.atomic.rmw32_u.or"; break;
+      case ThreadOp::I32AtomicXor:     opname = "i32.atomic.rmw.xor"; break;
+      case ThreadOp::I64AtomicXor:     opname = "i64.atomic.rmw.xor"; break;
+      case ThreadOp::I32AtomicXor8U:   opname = "i32.atomic.rmw8_u.xor"; break;
+      case ThreadOp::I32AtomicXor16U:  opname = "i32.atomic.rmw16_u.xor"; break;
+      case ThreadOp::I64AtomicXor8U:   opname = "i64.atomic.rmw8_u.xor"; break;
+      case ThreadOp::I64AtomicXor16U:  opname = "i64.atomic.rmw16_u.xor"; break;
+      case ThreadOp::I64AtomicXor32U:  opname = "i64.atomic.rmw32_u.xor"; break;
+      case ThreadOp::I32AtomicXchg:    opname = "i32.atomic.rmw.xchg"; break;
+      case ThreadOp::I64AtomicXchg:    opname = "i64.atomic.rmw.xchg"; break;
+      case ThreadOp::I32AtomicXchg8U:  opname = "i32.atomic.rmw8_u.xchg"; break;
+      case ThreadOp::I32AtomicXchg16U: opname = "i32.atomic.rmw16_u.xchg"; break;
+      case ThreadOp::I64AtomicXchg8U:  opname = "i64.atomic.rmw8_u.xchg"; break;
+      case ThreadOp::I64AtomicXchg16U: opname = "i64.atomic.rmw16_u.xchg"; break;
+      case ThreadOp::I64AtomicXchg32U: opname = "i64.atomic.rmw32_u.xchg"; break;
+      default:                         return Fail(c, "unexpected rmw operator");
+    }
+
+    if (!c.buffer.append(opname, strlen(opname)))
+        return false;
+
+    return RenderLoadStoreAddress(c, rmw.address(), 0);
+}
+
+static bool
+RenderAtomicStore(WasmRenderContext& c, AstAtomicStore& store)
+{
+    if (!RenderLoadStoreBase(c, store.address()))
+        return false;
+
+    if (!RenderExpr(c, store.value()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    MAP_AST_EXPR(c, store);
+    const char* opname;
+    switch (store.op()) {
+      case ThreadOp::I32AtomicStore8U:  opname = "i32.atomic.store8_u"; break;
+      case ThreadOp::I64AtomicStore8U:  opname = "i64.atomic.store8_u"; break;
+      case ThreadOp::I32AtomicStore16U: opname = "i32.atomic.store16_u"; break;
+      case ThreadOp::I64AtomicStore16U: opname = "i64.atomic.store16_u"; break;
+      case ThreadOp::I64AtomicStore32U: opname = "i64.atomic.store32_u"; break;
+      case ThreadOp::I32AtomicStore:    opname = "i32.atomic.store"; break;
+      case ThreadOp::I64AtomicStore:    opname = "i64.atomic.store"; break;
+      default:                          return Fail(c, "unexpected store operator");
+    }
+
+    if (!c.buffer.append(opname, strlen(opname)))
+        return false;
+
+    return RenderLoadStoreAddress(c, store.address(), 0);
+}
+
+static bool
+RenderWait(WasmRenderContext& c, AstWait& wait)
+{
+    if (!RenderLoadStoreBase(c, wait.address()))
+        return false;
+
+    if (!RenderExpr(c, wait.expected()))
+        return false;
+
+    if (!RenderExpr(c, wait.timeout()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    MAP_AST_EXPR(c, wait);
+    const char* opname;
+    switch (wait.op()) {
+      case ThreadOp::I32Wait:  opname = "i32.atomic.wait"; break;
+      case ThreadOp::I64Wait:  opname = "i64.atomic.wait"; break;
+      default:           return Fail(c, "unexpected wait operator");
+    }
+
+    if (!c.buffer.append(opname, strlen(opname)))
+        return false;
+
+    return RenderLoadStoreAddress(c, wait.address(), 0);
+}
+
+static bool
+RenderWake(WasmRenderContext& c, AstWake& wake)
+{
+    if (!RenderLoadStoreBase(c, wake.address()))
+        return false;
+
+    if (!RenderExpr(c, wake.count()))
+        return false;
+
+    if (!RenderIndent(c))
+        return false;
+
+    if (!c.buffer.append("atomic.wake", strlen("atomic.wake")))
+        return false;
+
+    return RenderLoadStoreAddress(c, wake.address(), 0);
+}
+
+static bool
 RenderExpr(WasmRenderContext& c, AstExpr& expr, bool newLine /* = true */)
 {
     switch (expr.kind()) {
       case AstExprKind::Drop:
         if (!RenderDrop(c, expr.as<AstDrop>()))
             return false;
         break;
       case AstExprKind::Nop:
@@ -1150,16 +1355,40 @@ RenderExpr(WasmRenderContext& c, AstExpr
       case AstExprKind::CurrentMemory:
         if (!RenderCurrentMemory(c, expr.as<AstCurrentMemory>()))
             return false;
         break;
       case AstExprKind::GrowMemory:
         if (!RenderGrowMemory(c, expr.as<AstGrowMemory>()))
             return false;
         break;
+      case AstExprKind::AtomicCmpXchg:
+        if (!RenderAtomicCmpXchg(c, expr.as<AstAtomicCmpXchg>()))
+            return false;
+        break;
+      case AstExprKind::AtomicLoad:
+        if (!RenderAtomicLoad(c, expr.as<AstAtomicLoad>()))
+            return false;
+        break;
+      case AstExprKind::AtomicRMW:
+        if (!RenderAtomicRMW(c, expr.as<AstAtomicRMW>()))
+            return false;
+        break;
+      case AstExprKind::AtomicStore:
+        if (!RenderAtomicStore(c, expr.as<AstAtomicStore>()))
+            return false;
+        break;
+      case AstExprKind::Wait:
+        if (!RenderWait(c, expr.as<AstWait>()))
+            return false;
+        break;
+      case AstExprKind::Wake:
+        if (!RenderWake(c, expr.as<AstWake>()))
+            return false;
+        break;
       default:
         MOZ_CRASH("Bad AstExprKind");
     }
 
     return !newLine || c.buffer.append("\n");
 }
 
 static bool