750694: halfmoon does not compile on windows (p=wmaddox, r=fklockii, r=throdrig)
authorTom Rodriguez <throdrig@adobe.com>
Fri, 03 Aug 2012 15:56:45 -0700
changeset 7523 5b15da1e8285c6c7e2a69926a778d2a92efaf884
parent 7522 558c686b0ccbcd434f0a4f2cebdd218b65629421
child 7524 7267380d93542a39fa6ef5daaea0a8c9a8997ec7
push id4255
push userdschaffe@adobe.com
push dateThu, 09 Aug 2012 13:25:42 +0000
reviewersfklockii, throdrig
bugs750694, 1096510
750694: halfmoon does not compile on windows (p=wmaddox, r=fklockii, r=throdrig) CL@1096510
core/Deopt-CL.cpp
core/Deopt-CL.h
core/Deopt.cpp
halfmoon/hm-check.cpp
halfmoon/hm-deoptimizer.cpp
halfmoon/hm-profiler.h
halfmoon/hm-stubs.cpp
halfmoon/hm-typeanalyzer.cpp
halfmoon/hm-types.h
halfmoon/profiler/profiler-types.h
nanojit/NativeARM.cpp
--- a/core/Deopt-CL.cpp
+++ b/core/Deopt-CL.cpp
@@ -34,17 +34,19 @@
  * and other provisions required by the GPL or the LGPL. If you do not delete
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #include "avmplus.h"
 
-#ifdef VMCFG_HALFMOON
+// TODO: I believe this code is now obsolete and should be removed.
+//#ifdef VMCFG_HALFMOON
+#if 0
 
 #include "CodegenLIR.h"
 #include "Deopt.h"
 #include "Deopt-CL.h"
 #include "Interpreter.h"
 
 namespace avmplus
 {
--- a/core/Deopt-CL.h
+++ b/core/Deopt-CL.h
@@ -35,16 +35,18 @@
  * the provisions above, a recipient may use your version of this file under
  * the terms of any one of the MPL, the GPL or the LGPL.
  *
  * ***** END LICENSE BLOCK ***** */
 
 #ifndef __avmplus_Deopt_CL__
 #define __avmplus_Deopt_CL__
 
+// TODO: As far as I know, this code is obsolete and should be removed.
+
 namespace avmplus
 {
     // Safepoints for CodegenLIR deoptimization metadata.  We represent them as a linked
     // list of first-class objects, with no attempt at compression.  Note that this
     // representation is completely hidden from the generic deoptimization runtime
     // machinery by the DeoptContext class.
 
     class CLSafepoint {
--- a/core/Deopt.cpp
+++ b/core/Deopt.cpp
@@ -51,19 +51,26 @@ namespace framelib {
     void returnFromFrameWithInt32(uint8_t* frame, int32_t value);
     void returnFromFrameWithDouble(uint8_t* frame, double value);
     void returnFromFrameWithInt64(uint8_t* frame, int64_t value);
     uint8_t* getReturnAddress(uint8_t* frame);
     void setReturnAddress(uint8_t* frame, uint8_t* address);
     uint8_t* getPointerArgument0(uint8_t* frame);
     void resumeFrameAtLocation(uint8_t* state, uint8_t* address);
     void saveActivationState(uint8_t* state);
-    uint8_t* getTrampForInt32();
-    uint8_t* getTrampForInt64();
-    uint8_t* getTrampForDouble();
+    // FIXME
+    // FrameLib is intended to be generic, and to depend only on nanojit.
+    // It was formerly located in the nanojit tree, which is where it belongs.
+    // These functions have knowledge of the deoptimization scheme, and don't
+    // belong here.  Also, moving them here messed up the resolution of the
+    // calls to the the stubs in the trampolines (asm code), which need to
+    // be in the same namespace.
+    //uint8_t* getTrampForInt32();
+    //uint8_t* getTrampForInt64();
+    //uint8_t* getTrampForDouble();
 #ifdef AVMPLUS_64BIT
     REALLY_INLINE void returnFromFrameWithPtr(uint8_t* frame, uintptr_t value) {
         framelib::returnFromFrameWithInt64(frame, value);
     }
 #else
     REALLY_INLINE void returnFromFrameWithPtr(uint8_t* frame, uintptr_t value) {
         framelib::returnFromFrameWithInt32(frame, value);
     }
@@ -106,16 +113,24 @@ void Deoptimizer::returnFromFrameWithUnb
     case BUILTIN_void:
     case BUILTIN_any:
     case BUILTIN_object:
         framelib::returnFromFrameWithPtr(frame, value);
         break;
     }
 }
 
+#ifdef linux
+// GCC on Linux apparently do not mangle C-linkage names.
+// Add a leading underscore here so references from assembler code will resolve.
+#define deoptFrameUponReturnWithInt32   _deoptFrameUponReturnWithInt32
+#define deoptFrameUponReturnWithInt64   _deoptFrameUponReturnWithInt64
+#define deoptFrameUponReturnWithDouble  _deoptFrameUponReturnWithDouble
+#endif
+
 // Static bridges from the assembly-language trampolines.
 // The asm trampoline has captured the victim's fp.
 // We use that to retrieve its first argument, the MethodEnv.
 // From the MethodEnv, we can obtain the MethodInfo and the Deoptimizer.
 
 extern "C"
 void deoptFrameUponReturnWithInt32(uint8_t* fp, int32_t result)
 {
@@ -132,16 +147,131 @@ void deoptFrameUponReturnWithInt64(uint8
 
 extern "C"
 void deoptFrameUponReturnWithDouble(uint8_t* fp, double result)
 {
     MethodEnv* env = (MethodEnv*)framelib::getPointerArgument0(fp);
     env->method->_armed_deoptimizers->deoptFrameUponReturnWithResult(fp, env, (void*)&result);
 }
 
+#ifdef VMCFG_IA32
+
+// TODO: These would be better off in a separate .asm file, but I don't think we
+// have one for the i386 builds, and I'm not ready to get into that yet.
+
+// Return a trampoline to capture the frame pointer and return value from a returning
+// function and transfer into the appropriate static bridge above.  We overwrite the
+// return address of the frame we wish to "hook" in order to take control when it returns.
+// We maintain 16-byte stack alignment, which is required on some platforms.
+
+// GCC will attempt to inline these functions, but emit an out-of-line version
+// as well.  This results in duplicate symbol definitions due to the inline
+// assembler code.  We could avoid this by using numeric local labels, but there
+// is no need to waste the code space, and it's good to have a symbol for the
+// trampoline.
+
+NO_INLINE
+uint8_t* getTrampForInt32()
+{
+    //using namespace framelib;
+#if _MSC_VER
+    _asm {
+        jmp     done
+      deopt_tramp_I32:
+        sub     esp, 8
+        push    eax
+        push    ebp
+        call    deoptFrameUponReturnWithInt32
+        /* NOTREACHED */
+        int     3
+      done:
+        lea     eax, deopt_tramp_I32
+    }
+#else
+    uint8_t* result;
+    asm volatile (
+        "    jmp   0f                                 ;\
+             .align 4                                 ;\
+             .globl deopt_tramp_I32                   ;\
+         deopt_tramp_I32:                             ;\
+             subl  $8, %%esp                          ;\
+             pushl %%eax                              ;\
+             pushl %%ebp                              ;\
+             call  _deoptFrameUponReturnWithInt32     ;\
+             int3                                     ;\
+         0:                                           ;\
+             movl $deopt_tramp_I32, %[result]         "
+         : : [result] "m"(result)
+    );
+    return result;
+#endif
+}
+
+NO_INLINE
+uint8_t* getTrampForInt64()
+{
+    return NULL;  //nyi
+}
+
+NO_INLINE
+uint8_t* getTrampForDouble()
+{
+    //using namespace framelib;
+#if _MSC_VER
+    _asm {
+        jmp     done
+      deopt_tramp_double:
+        sub     esp, 12
+        fstp    qword ptr [esp]
+        push    ebp
+        call    deoptFrameUponReturnWithDouble
+        /* NOTREACHED */
+        int     3
+      done:
+        lea     eax, deopt_tramp_double
+    }
+#else
+    uint8_t* result;
+    asm volatile (
+        "    jmp   0f                                 ;\
+             .align 4                                 ;\
+             .globl deopt_tramp_double                ;\
+         deopt_tramp_double:                          ;\
+             subl  $12, %%esp                         ;\
+             fstpl (%%esp)                            ;\
+             pushl %%ebp                              ;\
+             call  _deoptFrameUponReturnWithDouble    ;\
+             int3                                     ;\
+         0:                                           ;\
+             movl $deopt_tramp_double, %[result]      "
+         : : [result] "m"(result)
+    );
+    return result;
+#endif
+}
+
+#else
+
+uint8_t* getTrampForInt32() {
+    AvmAssert(false && "not implemented");
+    return 0;
+}
+
+uint8_t* getTrampForInt64() {
+   AvmAssert(false && "not implemented");
+   return 0;
+}
+
+uint8_t* getTrampForDouble() {
+    AvmAssert(false && "not implemented");
+    return 0;
+}
+
+#endif // ifdef VMCFG_IA32
+
 // Deoptimize the method associated with this Deoptimizer.
 // We arrange for activations of the method to be deoptimized as the stack unwinds.
 // Here, we do not actually revert the state of the method to interpreted, but only
 // arrange for the deoptimization of all pending activations.
 // TODO:  When unwinding for exceptions, we may skip over an armed frame, and should
 // then re-arm a frame that is still active.  Otherwise, the deoptimize-and-rearm chain
 // will be broken, and we will not deoptimize all activations.
 
@@ -190,26 +320,26 @@ void Deoptimizer::armForLazyDeoptimizati
     ctx->setSafepointFromNativePc(nativePc);
 
     // Get trampoline appropriate for the callee's return type.
     uint8_t* trampoline;
     switch (ctx->safepointReturnValueSST()) {
     case SST_int32:
     case SST_uint32:
     case SST_bool32:
-        trampoline = framelib::getTrampForInt32();
+        trampoline = getTrampForInt32();
         break;
     case SST_double:
-        trampoline = framelib::getTrampForDouble();
+        trampoline = getTrampForDouble();
         break;
     default:
       #ifdef AVMPLUS_64BIT
-        trampoline = framelib::getTrampForInt64();
+        trampoline = getTrampForInt64();
       #else
-        trampoline = framelib::getTrampForInt32();
+        trampoline = getTrampForInt32();
       #endif
         break;
     }
 
     // Install the trampoline as the callee's return address.
     framelib::setReturnAddress(calleeFrameBase, trampoline);
 
     this->armedFrame = victim;
@@ -512,17 +642,17 @@ void Deoptimizer::showArmed(AvmCore* cor
             core->console << "...";
             break;
         }
         core->console << " {" << hexAddr((uintptr_t)deopt) << " @ " << hexAddr((uintptr_t)deopt->armedFrame) << "}";
     }
     core->console << "\n";
 }
 
-#endif
+#endif // ifdef DEOPT_TRACE
 
 // Debugging scaffolding.
 
 //static
 bool Deoptimizer::deoptAncestor(AvmCore* core, uint32_t k)
 {
     for (MethodFrame* frame = methodFrame(core); frame != NULL; frame = frame->next) {
         MethodEnv *env = frame->env();
@@ -535,17 +665,18 @@ bool Deoptimizer::deoptAncestor(AvmCore*
                 core->exec->deoptimize(env);
             }
             return true;
         }
     }
     return false;
 }
 
-} // namespace avmplus {
+} // namespace avmplus
+
 
 namespace framelib {
 #ifdef VMCFG_IA32
 
 // Target architecture and backend-dependent library routines for manipulating stack frames.
 // Currently aimed at supporting deoptimization in Tamarin, though debugging suggests other
 // extensions to the API.
 //
@@ -753,109 +884,16 @@ void resumeFrameAtLocation(uint8_t* stat
     );
 #endif
 }
 
 #if _MSC_VER
 #pragma warning( pop )
 #endif
 
-// TODO: These would be better off in a separate .asm file, but I don't think we
-// have one for the i386 builds, and I'm not ready to get into that yet.
-
-// Return a trampoline to capture the frame pointer and return value from a returning
-// function and transfer into the appropriate static bridge above.  We overwrite the
-// return address of the frame we wish to "hook" in order to take control when it returns.
-// We maintain 16-byte stack alignment, which is required on some platforms.
-
-// GCC will attempt to inline these functions, but emit an out-of-line version
-// as well.  This results in duplicate symbol definitions due to the inline
-// assembler code.  We could avoid this by using numeric local labels, but there
-// is no need to waste the code space, and it's good to have a symbol for the
-// trampoline.
-
-NO_INLINE
-uint8_t* getTrampForInt32()
-{
-    using namespace framelib;
-#if _MSC_VER
-    _asm {
-        jmp     done
-      deopt_tramp_I32:
-        sub     esp, 8
-        push    eax
-        push    ebp
-        call    deoptFrameUponReturnWithInt32
-        /* NOTREACHED */
-        int     3
-      done:
-        lea     eax, deopt_tramp_I32
-    }
-#else
-    uint8_t* result;
-    asm volatile (
-        "    jmp   0f                                 ;\
-             .align 4                                 ;\
-             .globl deopt_tramp_I32                   ;\
-         deopt_tramp_I32:                             ;\
-             subl  $8, %%esp                          ;\
-             pushl %%eax                              ;\
-             pushl %%ebp                              ;\
-             call  _deoptFrameUponReturnWithInt32     ;\
-             int3                                     ;\
-         0:                                           ;\
-             movl $deopt_tramp_I32, %[result]         "
-         : : [result] "m"(result)
-    );
-    return result;
-#endif
-}
-
-NO_INLINE
-uint8_t* getTrampForInt64()
-{
-    return NULL;  //nyi
-}
-
-NO_INLINE
-uint8_t* getTrampForDouble()
-{
-    using namespace framelib;
-#if _MSC_VER
-    _asm {
-        jmp     done
-      deopt_tramp_double:
-        sub     esp, 12
-        fstp    qword ptr [esp]
-        push    ebp
-        call    deoptFrameUponReturnWithDouble
-        /* NOTREACHED */
-        int     3
-      done:
-        lea     eax, deopt_tramp_double
-    }
-#else
-    uint8_t* result;
-    asm volatile (
-        "    jmp   0f                                 ;\
-             .align 4                                 ;\
-             .globl deopt_tramp_double                ;\
-         deopt_tramp_double:                          ;\
-             subl  $12, %%esp                         ;\
-             fstpl (%%esp)                            ;\
-             pushl %%ebp                              ;\
-             call  _deoptFrameUponReturnWithDouble    ;\
-             int3                                     ;\
-         0:                                           ;\
-             movl $deopt_tramp_double, %[result]      "
-         : : [result] "m"(result)
-    );
-    return result;
-#endif
-}
 #else
 
 void returnFromFrame(uint8_t* /*frame*/) {
     AvmAssert(false && "not implemented");
 }
 
 void returnFromFrameWithInt32(uint8_t* /*frame*/, int32_t /*value*/) {
     AvmAssert(false && "not implemented");
@@ -886,27 +924,12 @@ uint8_t* getPointerArgument0(uint8_t* /*
 void resumeFrameAtLocation(uint8_t* /*state*/, uint8_t* /*address*/) {
     AvmAssert(false && "not implemented");
 }
 
 void saveActivationState(uint8_t* /*state*/) {
     AvmAssert(false && "not implemented");
 }
 
-uint8_t* getTrampForInt32() {
-    AvmAssert(false && "not implemented");
-    return 0;
-}
-
-uint8_t* getTrampForInt64() {
-   AvmAssert(false && "not implemented");
-   return 0;
-}
-
-uint8_t* getTrampForDouble() {
-    AvmAssert(false && "not implemented");
-    return 0;
-}
-
 #endif // ifdef VMCFG_IA32, etc
 } // end namespace framelib
 
 #endif // VMCFG_HALFMOON
--- a/halfmoon/hm-check.cpp
+++ b/halfmoon/hm-check.cpp
@@ -237,17 +237,17 @@ void TypeChecker::fail() {
   /* breakpoint here to debug */
   assert(false && "typecheck failed");
 }
 
 bool TypeChecker::do_default(Instr* instr) {
   // All inputs must have a type and not UN.
   for (ArrayRange<Use> u = useRange(instr); !u.empty(); u.popFront()) {
     const Use& use = u.front();
-    assert(use && "input was null");
+    assert(&use != NULL && "input was null");
     assert(type(use) /*&& !isBottom(type(use))*/);
   }
   if (hasInputSignature(instr)) {
     // Check types from signature.
     SigRange sig = inputSigRange(instr);
     ArrayRange<Use> u = useRange(instr);
     for (int i = 0; !u.empty(); u.popFront(), sig.popFront(), ++i) {
       const Type* t = type(u.front());
--- a/halfmoon/hm-deoptimizer.cpp
+++ b/halfmoon/hm-deoptimizer.cpp
@@ -514,17 +514,17 @@ HMDeoptContext::HMDeoptContext(DeoptData
 HMDeoptContext::~HMDeoptContext()
 {
   mmfx_delete_array(frame_slots_);
   mmfx_delete_array(frame_types_);
 }
 
 uint8_t HMDeoptContext::readUInt8()
 {
-  return *(mdins_++);
+  return uint8_t(*(mdins_++));
 }
 
 uint32_t HMDeoptContext::readUInt32()
 {
   uint32_t value = *(mdins_++);
   value = (value << 8) | *(mdins_++);
   value = (value << 8) | *(mdins_++);
   value = (value << 8) | *(mdins_++);
--- a/halfmoon/hm-profiler.h
+++ b/halfmoon/hm-profiler.h
@@ -50,9 +50,9 @@ namespace halfmoon {
         bool isHotterArm(ArmInstr* thinkHotArm, ArmInstr* thinkSlowArm);
 
     private:
         Allocator alloc_;
         HashMap<ArmInstr*, double> branch_probability_;
     };
 }
 
-#endif
\ No newline at end of file
+#endif
--- a/halfmoon/hm-stubs.cpp
+++ b/halfmoon/hm-stubs.cpp
@@ -1231,16 +1231,42 @@ void Stubs::do_si8(MethodFrame* f, int32
 void Stubs::do_si16(MethodFrame* f, int32_t value, int32_t addr) {
   *((uint16_t*)rangeCheck(f, addr, 2)) = uint16_t(value);
 }
 
 void Stubs::do_si32(MethodFrame* f, int32_t value, int32_t addr) {
   *((int32_t*)rangeCheck(f, addr, 4)) = value;
 }
 
+#else
+
+int32_t Stubs::do_li8(MethodFrame* f, int32_t addr) {
+  return avmplus::mop_liz8(rangeCheck(f, addr, 1));
+}
+
+int32_t Stubs::do_li16(MethodFrame* f, int32_t addr) {
+  return avmplus::mop_liz16(rangeCheck(f, addr, 2));
+}
+
+int32_t Stubs::do_li32(MethodFrame* f, int32_t addr) {
+  return avmplus::mop_li32(rangeCheck(f, addr, 4));
+}
+
+void Stubs::do_si8(MethodFrame* f, int32_t value, int32_t addr) {
+  avmplus::mop_si8(rangeCheck(f, addr, 1), value);
+}
+
+void Stubs::do_si16(MethodFrame* f, int32_t value, int32_t addr) {
+  avmplus::mop_si16(rangeCheck(f, addr, 2), value);
+}
+
+void Stubs::do_si32(MethodFrame* f, int32_t value, int32_t addr) {
+  avmplus::mop_si32(rangeCheck(f, addr, 4), value);
+}
+
 #endif
 
 #if defined(VMCFG_UNALIGNED_FP_ACCESS) && defined(VMCFG_LITTLE_ENDIAN)
 
 double Stubs::do_lf32(MethodFrame* f, int32_t addr) {
   return (double) *((float*) rangeCheck(f, addr, 4));
 }
 
@@ -1251,16 +1277,34 @@ double Stubs::do_lf64(MethodFrame* f, in
 void Stubs::do_sf32(MethodFrame* f, double value, int32_t addr) {
   *((float*) rangeCheck(f, addr, 4)) = float(value);
 }
 
 void Stubs::do_sf64(MethodFrame* f, double value, int32_t addr) {
   *((double*) rangeCheck(f, addr, 8)) = value;
 }
 
+#else
+
+double Stubs::do_lf32(MethodFrame* f, int32_t addr) {
+  return avmplus::mop_lf32(rangeCheck(f, addr, 4));
+}
+
+double Stubs::do_lf64(MethodFrame* f, int32_t addr) {
+  return avmplus::mop_lf64(rangeCheck(f, addr, 8));
+}
+
+void Stubs::do_sf32(MethodFrame* f, double value, int32_t addr) {
+  avmplus::mop_sf32(rangeCheck(f, addr, 4), value);
+}
+
+void Stubs::do_sf64(MethodFrame* f, double value, int32_t addr) {
+  avmplus::mop_sf64(rangeCheck(f, addr, 8), value);
+}
+
 #endif
 
 String* Stubs::do_abc_esc_xelem(MethodFrame* f, Atom value) {
   return core(f)->ToXMLString(value);
 }
 
 String* Stubs::do_abc_esc_xattr(MethodFrame* f, Atom value) {
   return core(f)->EscapeAttributeValue(value);
--- a/halfmoon/hm-typeanalyzer.cpp
+++ b/halfmoon/hm-typeanalyzer.cpp
@@ -619,17 +619,17 @@ bool isCastCall(Lattice* l, const Type* 
     return (*t = l->double_type), true;
   if (slot_traits == builtin.boolean_ctraits)
     return (*t = l->boolean_type), true;
   if (slot_traits == builtin.string_ctraits)
     return (*t = l->string_type[kTypeNotNull]), true;
   if (slot_traits && slot_traits->base == builtin.class_itraits &&
       slot_traits->getCreateClassClosureProc() == NULL) {
     // if this class C is user-defined then C(1+ args) means coerce<C>
-    assert(slot_traits->itraits && "Class with unknown instance traits");
+    assert(slot_traits->itraits != NULL && "Class with unknown instance traits");
     return (*t = l->makeType(slot_traits->itraits)), true;
   }
   return false;
 }
 
 void TypeAnalyzer::do_abc_callprop(CallStmt2* instr) {
   const Type* object_type = type(instr->object_in());
   CallAnalyzer b(object_type, type(instr->param_in()),
--- a/halfmoon/hm-types.h
+++ b/halfmoon/hm-types.h
@@ -1621,16 +1621,20 @@ public:
   bool operator!=(const TypeKey& other) const {
     return *type_ != *other.type_;
   }
 
   static size_t hash(const TypeKey& k) {
     return k.type_->hashCode();
   }
 
+  // Allow creation of dummy instance for compatibility with 'alignof'.
+  // MSVC++ does not provide this by default.
+  TypeKey() : type_(0) {}
+
 private:
   const Type* type_;
 };
 
 /**
  * Nullability enum. Used to index Lattice type pairs
  */
 enum NullabilityKind {
--- a/halfmoon/profiler/profiler-types.h
+++ b/halfmoon/profiler/profiler-types.h
@@ -55,9 +55,9 @@ enum RecordedType {
   kVECTOR,
   kBOOLEAN,
   kSTRING,
   kNAMESPACE,
 };
 
 };
 
-#endif // end DEFINE PROFILER_TYPES_
\ No newline at end of file
+#endif // end DEFINE PROFILER_TYPES_
--- a/nanojit/NativeARM.cpp
+++ b/nanojit/NativeARM.cpp
@@ -769,17 +769,19 @@ Assembler::asm_arg_float(LIns* arg, Para
     if( !singlePrecision && ((params.r == R1) || (params.r == R3)) ) {
         params.r = Register(params.r + 1);
     }
 
     verbose_only(if (_logc->lcbits & LC_Native)
         _logc->printf("Param.r now: %d (%s)\n",REGNUM(params.r),gpn(params.r)));
     if (params.r <= R3) {
         Register    ra = params.r;
-        Register dm;
+        // FIXME: The intial value shuts up diagnostic for a possibly unitialized
+        // variable, but there really is a problem here that needs to be investigated.
+        Register dm = 0;
 
         if (singlePrecision) {
             NanoAssert(ARM_VFP);
             dm = findRegFor(arg, FpSRegs);
             FMRS(ra,dm);
             params.r = Register(ra + 1);
             return;
         }