Bug 1626967: Use snake_case in shared Cranelift data structures; r=rhunt
authorBenjamin Bouvier <benj@benj.me>
Fri, 10 Apr 2020 09:53:15 +0000
changeset 523371 b44cac1aa5f193213c96749cf9ea3deea5efd73c
parent 523370 fb80ccae7376ec82a7a058de97c33f5c7b47f6cd
child 523372 09c016706d3f16ba0687c74baf0938a8f5f5188d
push id37301
push useraiakab@mozilla.com
push dateFri, 10 Apr 2020 21:37:00 +0000
treeherdermozilla-central@82d84da94d8d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrhunt
bugs1626967
milestone77.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1626967: Use snake_case in shared Cranelift data structures; r=rhunt This favors Rust, but there's no way to generate different names in the bindings, as far as I know. Differential Revision: https://phabricator.services.mozilla.com/D69395
js/src/wasm/WasmCraneliftCompile.cpp
js/src/wasm/cranelift/baldrapi.h
js/src/wasm/cranelift/src/bindings/mod.rs
js/src/wasm/cranelift/src/compile.rs
js/src/wasm/cranelift/src/isa.rs
js/src/wasm/cranelift/src/wasm2clif.rs
--- a/js/src/wasm/WasmCraneliftCompile.cpp
+++ b/js/src/wasm/WasmCraneliftCompile.cpp
@@ -114,21 +114,22 @@ static bool GenerateCraneliftCode(WasmMa
                                   StackMaps* stackMaps, size_t stackMapsOffset,
                                   size_t stackMapsCount, FuncOffsets* offsets) {
   const FuncTypeIdDesc& funcTypeId = funcType.id;
 
   wasm::GenerateFunctionPrologue(masm, funcTypeId, mozilla::Nothing(), offsets);
 
   // Omit the check when framePushed is small and we know there's no
   // recursion.
-  if (func.framePushed < MAX_UNCHECKED_LEAF_FRAME_SIZE && !func.containsCalls) {
-    masm.reserveStack(func.framePushed);
+  if (func.frame_pushed < MAX_UNCHECKED_LEAF_FRAME_SIZE &&
+      !func.contains_calls) {
+    masm.reserveStack(func.frame_pushed);
   } else {
     std::pair<CodeOffset, uint32_t> pair = masm.wasmReserveStackChecked(
-        func.framePushed, BytecodeOffset(lineOrBytecode));
+        func.frame_pushed, BytecodeOffset(lineOrBytecode));
     CodeOffset trapInsnOffset = pair.first;
     size_t nBytesReservedBeforeTrap = pair.second;
 
     MachineState trapExitLayout;
     size_t trapExitLayoutNumWords;
     GenerateTrapExitMachineState(&trapExitLayout, &trapExitLayoutNumWords);
 
     size_t nInboundStackArgBytes = StackArgAreaSizeUnaligned(funcType.args());
@@ -148,63 +149,63 @@ static bool GenerateCraneliftCode(WasmMa
 
     if (functionEntryStackMap &&
         !stackMaps->add((uint8_t*)(uintptr_t)trapInsnOffset.offset(),
                         functionEntryStackMap)) {
       functionEntryStackMap->destroy();
       return false;
     }
   }
-  MOZ_ASSERT(masm.framePushed() == func.framePushed);
+  MOZ_ASSERT(masm.framePushed() == func.frame_pushed);
 
   // Copy the machine code; handle jump tables and other read-only data below.
   uint32_t funcBase = masm.currentOffset();
-  if (!masm.appendRawCode(func.code, func.codeSize)) {
+  if (!masm.appendRawCode(func.code, func.code_size)) {
     return false;
   }
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
   uint32_t codeEnd = masm.currentOffset();
 #endif
 
-  wasm::GenerateFunctionEpilogue(masm, func.framePushed, offsets);
+  wasm::GenerateFunctionEpilogue(masm, func.frame_pushed, offsets);
 
-  if (func.numRodataRelocs > 0) {
+  if (func.num_rodata_relocs > 0) {
 #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
     constexpr size_t jumptableElementSize = 4;
 
-    MOZ_ASSERT(func.jumptablesSize % jumptableElementSize == 0);
+    MOZ_ASSERT(func.jumptables_size % jumptableElementSize == 0);
 
     // Align the jump tables properly.
     masm.haltingAlign(jumptableElementSize);
 
     // Copy over the tables and read-only data.
     uint32_t rodataBase = masm.currentOffset();
-    if (!masm.appendRawCode(func.code + func.codeSize,
-                            func.totalSize - func.codeSize)) {
+    if (!masm.appendRawCode(func.code + func.code_size,
+                            func.total_size - func.code_size)) {
       return false;
     }
 
-    uint32_t numElem = func.jumptablesSize / jumptableElementSize;
+    uint32_t numElem = func.jumptables_size / jumptableElementSize;
     uint32_t bias = rodataBase - codeEnd;
 
     // Bias the jump table(s).  The table values are negative values
     // representing backward jumps.  By shifting the table down we increase the
     // distance and so we add a negative value to reflect the larger distance.
     //
     // Note addToPCRel4() works from the end of the instruction, hence the loop
     // bounds.
     for (uint32_t i = 1; i <= numElem; i++) {
       masm.addToPCRel4(rodataBase + (i * jumptableElementSize), -bias);
     }
 
     // Patch up the code locations.  These represent forward distances that also
     // become greater, so we add a positive value.
-    for (uint32_t i = 0; i < func.numRodataRelocs; i++) {
-      MOZ_ASSERT(func.rodataRelocs[i] < func.codeSize);
-      masm.addToPCRel4(funcBase + func.rodataRelocs[i], bias);
+    for (uint32_t i = 0; i < func.num_rodata_relocs; i++) {
+      MOZ_ASSERT(func.rodata_relocs[i] < func.code_size);
+      masm.addToPCRel4(funcBase + func.rodata_relocs[i], bias);
     }
 #else
     MOZ_CRASH("No jump table support on this platform");
 #endif
   }
 
   masm.flush();
   if (masm.oom()) {
@@ -212,39 +213,39 @@ static bool GenerateCraneliftCode(WasmMa
   }
   offsets->end = masm.currentOffset();
 
   for (size_t i = 0; i < stackMapsCount; i++) {
     auto* maplet = stackMaps->getRef(stackMapsOffset + i);
     maplet->offsetBy(funcBase);
   }
 
-  for (size_t i = 0; i < func.numMetadata; i++) {
+  for (size_t i = 0; i < func.num_metadata; i++) {
     const CraneliftMetadataEntry& metadata = func.metadatas[i];
 
     CheckedInt<size_t> offset = funcBase;
-    offset += metadata.codeOffset;
+    offset += metadata.code_offset;
     if (!offset.isValid()) {
       return false;
     }
 
 #ifdef DEBUG
     // Check code offsets.
     MOZ_ASSERT(offset.value() >= offsets->normalEntry);
     MOZ_ASSERT(offset.value() < offsets->ret);
-    MOZ_ASSERT(metadata.moduleBytecodeOffset != 0);
+    MOZ_ASSERT(metadata.module_bytecode_offset != 0);
 
     // Check bytecode offsets.
     if (lineOrBytecode > 0) {
-      MOZ_ASSERT(metadata.moduleBytecodeOffset >= lineOrBytecode);
-      MOZ_ASSERT(metadata.moduleBytecodeOffset <
+      MOZ_ASSERT(metadata.module_bytecode_offset >= lineOrBytecode);
+      MOZ_ASSERT(metadata.module_bytecode_offset <
                  lineOrBytecode + funcBytecodeSize);
     }
 #endif
-    uint32_t bytecodeOffset = metadata.moduleBytecodeOffset;
+    uint32_t bytecodeOffset = metadata.module_bytecode_offset;
 
     switch (metadata.which) {
       case CraneliftMetadataEntry::Which::DirectCall: {
         CallSiteDesc desc(bytecodeOffset, CallSiteDesc::Func);
         masm.append(desc, CodeOffset(offset.value()), metadata.extra);
         break;
       }
       case CraneliftMetadataEntry::Which::IndirectCall: {
@@ -285,25 +286,25 @@ static bool GenerateCraneliftCode(WasmMa
 class AutoCranelift {
   CraneliftStaticEnvironment staticEnv_;
   CraneliftModuleEnvironment env_;
   CraneliftCompiler* compiler_;
 
  public:
   explicit AutoCranelift(const ModuleEnvironment& env)
       : env_(env), compiler_(nullptr) {
-    staticEnv_.refTypesEnabled = env.refTypesEnabled();
+    staticEnv_.ref_types_enabled = env.refTypesEnabled();
 #ifdef WASM_SUPPORTS_HUGE_MEMORY
     if (env.hugeMemoryEnabled()) {
       // In the huge memory configuration, we always reserve the full 4 GB
       // index space for a heap.
-      staticEnv_.staticMemoryBound = HugeIndexRange;
-      staticEnv_.memoryGuardSize = HugeOffsetGuardLimit;
+      staticEnv_.static_memory_bound = HugeIndexRange;
+      staticEnv_.memory_guard_size = HugeOffsetGuardLimit;
     } else {
-      staticEnv_.memoryGuardSize = OffsetGuardLimit;
+      staticEnv_.memory_guard_size = OffsetGuardLimit;
     }
 #endif
     // Otherwise, heap bounds are stored in the `boundsCheckLimit` field
     // of TlsData.
   }
   bool init() {
     compiler_ = cranelift_compiler_create(&staticEnv_, &env_);
     return !!compiler_;
@@ -314,61 +315,61 @@ class AutoCranelift {
     }
   }
   operator CraneliftCompiler*() { return compiler_; }
 };
 
 CraneliftFuncCompileInput::CraneliftFuncCompileInput(
     const FuncCompileInput& func)
     : bytecode(func.begin),
-      bytecodeSize(func.end - func.begin),
+      bytecode_size(func.end - func.begin),
       index(func.index),
       offset_in_module(func.lineOrBytecode) {}
 
 static_assert(offsetof(TlsData, boundsCheckLimit) == sizeof(size_t),
               "fix make_heap() in wasm2clif.rs");
 
 CraneliftStaticEnvironment::CraneliftStaticEnvironment()
     :
 #ifdef JS_CODEGEN_X64
-      hasSse2(Assembler::HasSSE2()),
-      hasSse3(Assembler::HasSSE3()),
-      hasSse41(Assembler::HasSSE41()),
-      hasSse42(Assembler::HasSSE42()),
-      hasPopcnt(Assembler::HasPOPCNT()),
-      hasAvx(Assembler::HasAVX()),
-      hasBmi1(Assembler::HasBMI1()),
-      hasBmi2(Assembler::HasBMI2()),
-      hasLzcnt(Assembler::HasLZCNT()),
+      has_sse2(Assembler::HasSSE2()),
+      has_sse3(Assembler::HasSSE3()),
+      has_sse41(Assembler::HasSSE41()),
+      has_sse42(Assembler::HasSSE42()),
+      has_popcnt(Assembler::HasPOPCNT()),
+      has_avx(Assembler::HasAVX()),
+      has_bmi1(Assembler::HasBMI1()),
+      has_bmi2(Assembler::HasBMI2()),
+      has_lzcnt(Assembler::HasLZCNT()),
 #else
-      hasSse2(false),
-      hasSse3(false),
-      hasSse41(false),
-      hasSse42(false),
-      hasPopcnt(false),
-      hasAvx(false),
-      hasBmi1(false),
-      hasBmi2(false),
-      hasLzcnt(false),
+      has_sse2(false),
+      has_sse3(false),
+      has_sse41(false),
+      has_sse42(false),
+      has_popcnt(false),
+      has_avx(false),
+      has_bmi1(false),
+      has_bmi2(false),
+      has_lzcnt(false),
 #endif
 #if defined(XP_WIN)
-      platformIsWindows(true),
+      platform_is_windows(true),
 #else
-      platformIsWindows(false),
+      platform_is_windows(false),
 #endif
-      refTypesEnabled(false),
-      staticMemoryBound(0),
-      memoryGuardSize(0),
-      memoryBaseTlsOffset(offsetof(TlsData, memoryBase)),
-      instanceTlsOffset(offsetof(TlsData, instance)),
-      interruptTlsOffset(offsetof(TlsData, interrupt)),
-      cxTlsOffset(offsetof(TlsData, cx)),
-      realmCxOffset(JSContext::offsetOfRealm()),
-      realmTlsOffset(offsetof(TlsData, realm)),
-      realmFuncImportTlsOffset(offsetof(FuncImportTls, realm)) {
+      ref_types_enabled(false),
+      static_memory_bound(0),
+      memory_guard_size(0),
+      memory_base_tls_offset(offsetof(TlsData, memoryBase)),
+      instance_tls_offset(offsetof(TlsData, instance)),
+      interrupt_tls_offset(offsetof(TlsData, interrupt)),
+      cx_tls_offset(offsetof(TlsData, cx)),
+      realm_cx_offset(JSContext::offsetOfRealm()),
+      realm_tls_offset(offsetof(TlsData, realm)),
+      realm_func_import_tls_offset(offsetof(FuncImportTls, realm)) {
 }
 
 // Most of BaldrMonkey's data structures refer to a "global offset" which is a
 // byte offset into the `globalArea` field of the  `TlsData` struct.
 //
 // Cranelift represents global variables with their byte offset from the "VM
 // context pointer" which is the `WasmTlsReg` pointing to the `TlsData` struct.
 //
--- a/js/src/wasm/cranelift/baldrapi.h
+++ b/js/src/wasm/cranelift/baldrapi.h
@@ -52,36 +52,36 @@ struct ModuleEnvironment;
 }  // namespace js
 
 // This struct contains all the information that can be computed once for the
 // entire process and then should never change. It contains a mix of CPU
 // feature detection flags, and static information the C++ compile has access
 // to, but which can't be automatically provided to Rust.
 
 struct CraneliftStaticEnvironment {
-  bool hasSse2;
-  bool hasSse3;
-  bool hasSse41;
-  bool hasSse42;
-  bool hasPopcnt;
-  bool hasAvx;
-  bool hasBmi1;
-  bool hasBmi2;
-  bool hasLzcnt;
-  bool platformIsWindows;
-  bool refTypesEnabled;
-  size_t staticMemoryBound;
-  size_t memoryGuardSize;
-  size_t memoryBaseTlsOffset;
-  size_t instanceTlsOffset;
-  size_t interruptTlsOffset;
-  size_t cxTlsOffset;
-  size_t realmCxOffset;
-  size_t realmTlsOffset;
-  size_t realmFuncImportTlsOffset;
+  bool has_sse2;
+  bool has_sse3;
+  bool has_sse41;
+  bool has_sse42;
+  bool has_popcnt;
+  bool has_avx;
+  bool has_bmi1;
+  bool has_bmi2;
+  bool has_lzcnt;
+  bool platform_is_windows;
+  bool ref_types_enabled;
+  size_t static_memory_bound;
+  size_t memory_guard_size;
+  size_t memory_base_tls_offset;
+  size_t instance_tls_offset;
+  size_t interrupt_tls_offset;
+  size_t cx_tls_offset;
+  size_t realm_cx_offset;
+  size_t realm_tls_offset;
+  size_t realm_func_import_tls_offset;
 
   // Not bindgen'd because it's inlined.
   inline CraneliftStaticEnvironment();
 };
 
 // This structure proxies the C++ ModuleEnvironment and the information it
 // contains.
 
@@ -98,17 +98,17 @@ struct CraneliftModuleEnvironment {
 struct BD_Stackmaps;
 
 // Data for a single wasm function to be compiled by Cranelift.
 // This information is all from the corresponding `js::wasm::FuncCompileInput`
 // struct, but formatted in a Rust-friendly way.
 
 struct CraneliftFuncCompileInput {
   const uint8_t* bytecode;
-  size_t bytecodeSize;
+  size_t bytecode_size;
   uint32_t index;
   uint32_t offset_in_module;
 
   // The stackmaps sink to use when compiling this function.
   BD_Stackmaps* stackmaps;
 
   // Not bindgen'd because it's inlined.
   explicit inline CraneliftFuncCompileInput(const js::wasm::FuncCompileInput&);
@@ -118,47 +118,47 @@ struct CraneliftFuncCompileInput {
 // single wasm function. The meaning of the field extra depends on the enum
 // value.
 //
 // XXX should we use a union for this instead? bindgen seems to be able to
 // handle them, with a lot of unsafe'ing.
 
 struct CraneliftMetadataEntry {
   enum Which { DirectCall, IndirectCall, Trap, SymbolicAccess } which;
-  uint32_t codeOffset;
-  uint32_t moduleBytecodeOffset;
+  uint32_t code_offset;
+  uint32_t module_bytecode_offset;
   size_t extra;
 };
 
 // The result of a single function compilation, containing the machine code
 // generated by Cranelift, as well as some useful metadata to generate the
 // prologue/epilogue etc.
 
 struct CraneliftCompiledFunc {
-  size_t numMetadata;
+  size_t num_metadata;
   const CraneliftMetadataEntry* metadatas;
 
-  size_t framePushed;
-  bool containsCalls;
+  size_t frame_pushed;
+  bool contains_calls;
 
   // The compiled code comprises machine code, relocatable jump tables, and
   // copyable read-only data, concatenated without padding.  The "...Size"
   // members give the sizes of the individual sections.  The code starts at
   // offsets 0; the other offsets can be derived from the sizes.
   const uint8_t* code;
-  size_t codeSize;
-  size_t jumptablesSize;
-  size_t rodataSize;
-  size_t totalSize;
+  size_t code_size;
+  size_t jumptables_size;
+  size_t rodata_size;
+  size_t total_size;
 
   // Relocation information for instructions that reference into the jump tables
   // and read-only data segments.  The relocation information is
   // machine-specific.
-  size_t numRodataRelocs;
-  const uint32_t* rodataRelocs;
+  size_t num_rodata_relocs;
+  const uint32_t* rodata_relocs;
 };
 
 // Possible constant values for initializing globals.
 
 struct BD_ConstantValue {
   js::wasm::TypeCode t;
   union {
     int32_t i32;
--- a/js/src/wasm/cranelift/src/bindings/mod.rs
+++ b/js/src/wasm/cranelift/src/bindings/mod.rs
@@ -231,86 +231,86 @@ impl<'a> ModuleEnvironment<'a> {
         i64::from(self.env.min_memory_length)
     }
 }
 
 /// Extra methods for some C++ wrappers.
 
 impl FuncCompileInput {
     pub fn bytecode(&self) -> &[u8] {
-        unsafe { slice::from_raw_parts(self.bytecode, self.bytecodeSize) }
+        unsafe { slice::from_raw_parts(self.bytecode, self.bytecode_size) }
     }
 
     pub fn stackmaps(&self) -> Stackmaps {
         Stackmaps(self.stackmaps)
     }
 }
 
 impl CompiledFunc {
     pub fn reset(&mut self, compiled_func: &compile::CompiledFunc) {
-        self.numMetadata = compiled_func.metadata.len();
+        self.num_metadata = compiled_func.metadata.len();
         self.metadatas = compiled_func.metadata.as_ptr();
 
-        self.framePushed = compiled_func.frame_pushed as usize;
-        self.containsCalls = compiled_func.contains_calls;
+        self.frame_pushed = compiled_func.frame_pushed as usize;
+        self.contains_calls = compiled_func.contains_calls;
 
         self.code = compiled_func.code_buffer.as_ptr();
-        self.codeSize = compiled_func.code_size as usize;
-        self.jumptablesSize = compiled_func.jumptables_size as usize;
-        self.rodataSize = compiled_func.rodata_size as usize;
-        self.totalSize = compiled_func.code_buffer.len();
+        self.code_size = compiled_func.code_size as usize;
+        self.jumptables_size = compiled_func.jumptables_size as usize;
+        self.rodata_size = compiled_func.rodata_size as usize;
+        self.total_size = compiled_func.code_buffer.len();
 
-        self.numRodataRelocs = compiled_func.rodata_relocs.len();
-        self.rodataRelocs = compiled_func.rodata_relocs.as_ptr();
+        self.num_rodata_relocs = compiled_func.rodata_relocs.len();
+        self.rodata_relocs = compiled_func.rodata_relocs.as_ptr();
     }
 }
 
 impl MetadataEntry {
     pub fn direct_call(code_offset: CodeOffset, srcloc: SourceLoc, func_index: FuncIndex) -> Self {
         Self {
             which: CraneliftMetadataEntry_Which_DirectCall,
-            codeOffset: code_offset,
-            moduleBytecodeOffset: srcloc.bits(),
+            code_offset,
+            module_bytecode_offset: srcloc.bits(),
             extra: func_index.index(),
         }
     }
     pub fn indirect_call(ret_addr: CodeOffset, srcloc: SourceLoc) -> Self {
         Self {
             which: CraneliftMetadataEntry_Which_IndirectCall,
-            codeOffset: ret_addr,
-            moduleBytecodeOffset: srcloc.bits(),
+            code_offset: ret_addr,
+            module_bytecode_offset: srcloc.bits(),
             extra: 0,
         }
     }
     pub fn trap(code_offset: CodeOffset, srcloc: SourceLoc, which: Trap) -> Self {
         Self {
             which: CraneliftMetadataEntry_Which_Trap,
-            codeOffset: code_offset,
-            moduleBytecodeOffset: srcloc.bits(),
+            code_offset,
+            module_bytecode_offset: srcloc.bits(),
             extra: which as usize,
         }
     }
     pub fn symbolic_access(
         code_offset: CodeOffset,
         srcloc: SourceLoc,
         sym: SymbolicAddress,
     ) -> Self {
         Self {
             which: CraneliftMetadataEntry_Which_SymbolicAccess,
-            codeOffset: code_offset,
-            moduleBytecodeOffset: srcloc.bits(),
+            code_offset,
+            module_bytecode_offset: srcloc.bits(),
             extra: sym as usize,
         }
     }
 }
 
 impl StaticEnvironment {
     /// Returns the default calling convention on this machine.
     pub fn call_conv(&self) -> isa::CallConv {
-        if self.platformIsWindows {
+        if self.platform_is_windows {
             isa::CallConv::BaldrdashWindows
         } else {
             isa::CallConv::BaldrdashSystemV
         }
     }
 }
 
 pub struct Stackmaps(*mut self::low_level::BD_Stackmaps);
--- a/js/src/wasm/cranelift/src/compile.rs
+++ b/js/src/wasm/cranelift/src/compile.rs
@@ -191,17 +191,17 @@ impl<'a, 'b> BatchCompiler<'a, 'b> {
                     &mut traps,
                     &mut NullStackmapSink {},
                 )
             };
 
             self.current_func.metadata.append(&mut traps.metadata);
         }
 
-        if self.static_environ.refTypesEnabled {
+        if self.static_environ.ref_types_enabled {
             self.emit_stackmaps(stackmaps);
         }
 
         self.current_func.code_size = info.code_size;
         self.current_func.jumptables_size = info.jumptables_size;
         self.current_func.rodata_size = info.rodata_size;
 
         Ok(())
--- a/js/src/wasm/cranelift/src/isa.rs
+++ b/js/src/wasm/cranelift/src/isa.rs
@@ -108,17 +108,17 @@ fn make_shared_flags(
     //
     // 1. Return address (whether explicitly pushed on ARM or implicitly on x86).
     // 2. TLS register.
     // 3. Previous frame pointer.
     //
     sb.set("baldrdash_prologue_words", "3")?;
 
     // Make sure that libcalls use the supplementary VMContext argument.
-    let libcall_call_conv = if env.platformIsWindows {
+    let libcall_call_conv = if env.platform_is_windows {
         "baldrdash_windows"
     } else {
         "baldrdash_system_v"
     };
     sb.set("libcall_call_conv", libcall_call_conv)?;
 
     // Assembler::PatchDataWithValueCheck expects -1 stored where a function address should be
     // patched in.
@@ -152,53 +152,53 @@ fn make_shared_flags(
         if enable_jump_tables { "true" } else { "false" },
     )?;
 
     if cfg!(feature = "cranelift_x86") && cfg!(target_pointer_width = "64") {
         sb.enable("enable_pinned_reg")?;
         sb.enable("use_pinned_reg_as_heap_base")?;
     }
 
-    if env.refTypesEnabled {
+    if env.ref_types_enabled {
         sb.enable("enable_safepoints")?;
     }
 
     Ok(settings::Flags::new(sb))
 }
 
 #[cfg(feature = "cranelift_x86")]
 fn make_isa_specific(env: &StaticEnvironment) -> DashResult<isa::Builder> {
     let mut ib = isa::lookup_by_name("x86_64-unknown-unknown").map_err(BasicError::from)?;
 
-    if !env.hasSse2 {
+    if !env.has_sse2 {
         return Err("SSE2 is mandatory for Baldrdash!".into());
     }
 
-    if env.hasSse3 {
+    if env.has_sse3 {
         ib.enable("has_sse3").map_err(BasicError::from)?;
     }
-    if env.hasSse41 {
+    if env.has_sse41 {
         ib.enable("has_sse41").map_err(BasicError::from)?;
     }
-    if env.hasSse42 {
+    if env.has_sse42 {
         ib.enable("has_sse42").map_err(BasicError::from)?;
     }
-    if env.hasPopcnt {
+    if env.has_popcnt {
         ib.enable("has_popcnt").map_err(BasicError::from)?;
     }
-    if env.hasAvx {
+    if env.has_avx {
         ib.enable("has_avx").map_err(BasicError::from)?;
     }
-    if env.hasBmi1 {
+    if env.has_bmi1 {
         ib.enable("has_bmi1").map_err(BasicError::from)?;
     }
-    if env.hasBmi2 {
+    if env.has_bmi2 {
         ib.enable("has_bmi2").map_err(BasicError::from)?;
     }
-    if env.hasLzcnt {
+    if env.has_lzcnt {
         ib.enable("has_lzcnt").map_err(BasicError::from)?;
     }
 
     Ok(ib)
 }
 
 #[cfg(not(feature = "cranelift_x86"))]
 fn make_isa_specific(_env: &StaticEnvironment) -> DashResult<isa::Builder> {
--- a/js/src/wasm/cranelift/src/wasm2clif.rs
+++ b/js/src/wasm/cranelift/src/wasm2clif.rs
@@ -392,17 +392,17 @@ impl<'a, 'b, 'c> TransEnv<'a, 'b, 'c> {
     fn load_instance(&mut self, pos: &mut FuncCursor) -> ir::Value {
         let gv = match self.instance_gv.expand() {
             Some(gv) => gv,
             None => {
                 // We need to allocate the global variable.
                 let vmctx = self.get_vmctx_gv(pos.func);
                 let gv = pos.func.create_global_value(ir::GlobalValueData::IAddImm {
                     base: vmctx,
-                    offset: imm64(self.static_env.instanceTlsOffset),
+                    offset: imm64(self.static_env.instance_tls_offset),
                     global_type: POINTER_TYPE,
                 });
                 self.instance_gv = gv.into();
                 gv
             }
         };
         let ga = pos.ins().global_value(POINTER_TYPE, gv);
         pos.ins().load(POINTER_TYPE, ir::MemFlags::trusted(), ga, 0)
@@ -412,17 +412,17 @@ impl<'a, 'b, 'c> TransEnv<'a, 'b, 'c> {
     fn load_interrupt_flag(&mut self, pos: &mut FuncCursor) -> ir::Value {
         let gv = match self.interrupt_gv.expand() {
             Some(gv) => gv,
             None => {
                 // We need to allocate the global variable.
                 let vmctx = self.get_vmctx_gv(pos.func);
                 let gv = pos.func.create_global_value(ir::GlobalValueData::IAddImm {
                     base: vmctx,
-                    offset: imm64(self.static_env.interruptTlsOffset),
+                    offset: imm64(self.static_env.interrupt_tls_offset),
                     global_type: POINTER_TYPE,
                 });
                 self.interrupt_gv = gv.into();
                 gv
             }
         };
         let ga = pos.ins().global_value(POINTER_TYPE, gv);
         pos.ins()
@@ -458,89 +458,92 @@ impl<'a, 'b, 'c> TransEnv<'a, 'b, 'c> {
     /// realm value, in case the call has used a different realm.
     fn switch_to_wasm_tls_realm(&mut self, pos: &mut FuncCursor) {
         if self.cx_addr.is_none() {
             let vmctx = self.get_vmctx_gv(&mut pos.func);
             self.cx_addr = pos
                 .func
                 .create_global_value(ir::GlobalValueData::IAddImm {
                     base: vmctx,
-                    offset: imm64(self.static_env.cxTlsOffset),
+                    offset: imm64(self.static_env.cx_tls_offset),
                     global_type: POINTER_TYPE,
                 })
                 .into();
         }
 
         if self.realm_addr.is_none() {
             let vmctx = self.get_vmctx_gv(&mut pos.func);
             self.realm_addr = pos
                 .func
                 .create_global_value(ir::GlobalValueData::IAddImm {
                     base: vmctx,
-                    offset: imm64(self.static_env.realmTlsOffset),
+                    offset: imm64(self.static_env.realm_tls_offset),
                     global_type: POINTER_TYPE,
                 })
                 .into();
         }
 
         let ptr = POINTER_TYPE;
         let flags = ir::MemFlags::trusted();
         let cx_addr_val = pos.ins().global_value(ptr, self.cx_addr.unwrap());
         let cx = pos.ins().load(ptr, flags, cx_addr_val, 0);
         let realm_addr_val = pos.ins().global_value(ptr, self.realm_addr.unwrap());
         let realm = pos.ins().load(ptr, flags, realm_addr_val, 0);
         pos.ins()
-            .store(flags, realm, cx, offset32(self.static_env.realmCxOffset));
+            .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
     }
 
     /// Update the JSContext's realm value in preparation for making an indirect call through
     /// an external table.
     fn switch_to_indirect_callee_realm(&mut self, pos: &mut FuncCursor, vmctx: ir::Value) {
         let ptr = POINTER_TYPE;
         let flags = ir::MemFlags::trusted();
         let cx = pos
             .ins()
-            .load(ptr, flags, vmctx, offset32(self.static_env.cxTlsOffset));
-        let realm = pos
-            .ins()
-            .load(ptr, flags, vmctx, offset32(self.static_env.realmTlsOffset));
+            .load(ptr, flags, vmctx, offset32(self.static_env.cx_tls_offset));
+        let realm = pos.ins().load(
+            ptr,
+            flags,
+            vmctx,
+            offset32(self.static_env.realm_tls_offset),
+        );
         pos.ins()
-            .store(flags, realm, cx, offset32(self.static_env.realmCxOffset));
+            .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
     }
 
     /// Update the JSContext's realm value in preparation for making a call to an imported
     /// function.
     fn switch_to_import_realm(
         &mut self,
         pos: &mut FuncCursor,
         vmctx: ir::Value,
         gv_addr: ir::Value,
     ) {
         let ptr = POINTER_TYPE;
         let flags = ir::MemFlags::trusted();
         let cx = pos
             .ins()
-            .load(ptr, flags, vmctx, offset32(self.static_env.cxTlsOffset));
+            .load(ptr, flags, vmctx, offset32(self.static_env.cx_tls_offset));
         let realm = pos.ins().load(
             ptr,
             flags,
             gv_addr,
-            offset32(self.static_env.realmFuncImportTlsOffset),
+            offset32(self.static_env.realm_func_import_tls_offset),
         );
         pos.ins()
-            .store(flags, realm, cx, offset32(self.static_env.realmCxOffset));
+            .store(flags, realm, cx, offset32(self.static_env.realm_cx_offset));
     }
 
     fn load_pinned_reg(&self, pos: &mut FuncCursor, vmctx: ir::Value) {
         if cfg!(feature = "cranelift_x86") && cfg!(target_pointer_width = "64") {
             let heap_base = pos.ins().load(
                 POINTER_TYPE,
                 ir::MemFlags::trusted(),
                 vmctx,
-                self.static_env.memoryBaseTlsOffset as i32,
+                self.static_env.memory_base_tls_offset as i32,
             );
             pos.ins().set_pinned_reg(heap_base);
         }
     }
 
     fn reload_tls_and_pinned_regs(&mut self, pos: &mut FuncCursor) {
         let vmctx_gv = self.get_vmctx_gv(&mut pos.func);
         let vmctx = pos.ins().global_value(POINTER_TYPE, vmctx_gv);
@@ -694,17 +697,17 @@ impl<'a, 'b, 'c> FuncEnvironment for Tra
         if index.index() != 0 {
             return Err(WasmError::Unsupported(
                 "only one wasm memory supported".to_string(),
             ));
         }
 
         let vcmtx = self.get_vmctx_gv(func);
 
-        let bound = self.static_env.staticMemoryBound as u64;
+        let bound = self.static_env.static_memory_bound as u64;
         let is_static = bound > 0;
 
         // Get the `TlsData::memoryBase` field.
         let base = func.create_global_value(ir::GlobalValueData::Load {
             base: vcmtx,
             offset: offset32(0),
             global_type: POINTER_TYPE,
             readonly: is_static,
@@ -721,17 +724,17 @@ impl<'a, 'b, 'c> FuncEnvironment for Tra
                 offset: POINTER_SIZE.into(),
                 global_type: ir::types::I32,
                 readonly: false,
             });
             ir::HeapStyle::Dynamic { bound_gv }
         };
 
         let min_size = (self.env.min_memory_length() as u64).into();
-        let offset_guard_size = (self.static_env.memoryGuardSize as u64).into();
+        let offset_guard_size = (self.static_env.memory_guard_size as u64).into();
 
         Ok(func.create_heap(ir::HeapData {
             base,
             min_size,
             offset_guard_size,
             style,
             index_type: ir::types::I32,
         }))