no bug: assorted bug fixes concurrency, telemetry (p=jasowill,p=dtomack,p=gcomnino) default tip
authorDan Schaffer <Dan.Schaffer@adobe.com>
Tue, 29 Jan 2013 13:37:51 -0800
changeset 7587 5571cf86fc68
parent 7586 2b38aa82c803
push id4262
push userdschaffe@adobe.com
push dateWed, 30 Jan 2013 19:01:31 +0000
bugs1155278, 1159600
no bug: assorted bug fixes concurrency, telemetry (p=jasowill,p=dtomack,p=gcomnino) integrate CL# 1155278 CL@1159600
AVMPI/MMgcPortMac.cpp
core/CodegenLIR.h
core/ITelemetry.h
core/Isolate-inlines.h
core/Isolate.cpp
core/Isolate.h
nanojit/Assembler.cpp
nanojit/LIR.h
nanojit/NativeX64.cpp
nanojit/nanojit.h
platform/win32/win32-platform.h
--- a/AVMPI/MMgcPortMac.cpp
+++ b/AVMPI/MMgcPortMac.cpp
@@ -8,16 +8,20 @@
 
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
 #include <fcntl.h>
 #include <signal.h>
 
+#if defined(PEPPER_PLUGIN)
+#include <mach/mach.h>
+#endif // PEPPER_PLUGIN
+
 #ifdef MMGC_MEMORY_PROFILER
 #include <dlfcn.h>
 #include <cxxabi.h>
 #include <mach-o/dyld.h>
 #endif
 
 #include <sys/mman.h>
 #include <sys/types.h>
--- a/core/CodegenLIR.h
+++ b/core/CodegenLIR.h
@@ -5,16 +5,20 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 #ifndef __avmplus_CodegenLIR__
 #define __avmplus_CodegenLIR__
 
 #include "LirHelper.h"
 #include "InvokerCompiler.h"
 
+#if defined(VMCFG_INTERRUPT_SAFEPOINT_POLL) && !defined(NJ_SAFEPOINT_POLLING_SUPPORTED)
+#error "configuration error: safepoint polling not supported on this platform"
+#endif
+
 namespace avmplus
 {
     using namespace nanojit;
 
    /**
     * Each InEdge record tracks an unpatched branch.  When the target code
     * is generated in emitLabel(), we patch each tracked instruction or
     * jtbl entry.
--- a/core/ITelemetry.h
+++ b/core/ITelemetry.h
@@ -107,69 +107,85 @@
 namespace telemetry
 {
     typedef const char * telemetryId;
 
     // It is recommended to use the above macros -- they are sufficient for most use cases.
     class ITelemetry
     {
     public:
-        ITelemetry() : m_active (false) {}
+        ITelemetry() : m_active (false), m_spanNestLevel (0) {}
         virtual ~ITelemetry() {}
         bool IsActive() {   return m_active;    } // Check whether Telemetry is ON.
         virtual uint64_t GetElapsedTime() = 0;    // Get a microsecond timestamp since Telemetry start up.
+        int32_t GetSpanNestLevel() { return m_spanNestLevel; } // Get the current span nesting level.
 
         virtual void WriteValue(telemetryId id, bool value) = 0;    // Writes a boolean metric.
         virtual void WriteValue(telemetryId id, uint32_t value) = 0;
         virtual void WriteValue(telemetryId id, int32_t value) = 0;
         virtual void WriteValue(telemetryId id, double value) = 0;
         virtual void WriteValue(telemetryId id, const char* value) = 0;
         virtual void WriteValue(telemetryId id, uint64_t value) = 0;
         virtual void WriteValue(telemetryId id, int64_t value) = 0;
         virtual void WriteValue(telemetryId id, const uint8_t *value, uint32_t len) = 0; // Writes bytearray.
         virtual void WriteValue(telemetryId id, const uint32_t value[], uint32_t len) = 0; // Writes unsigned integer array
         virtual void WriteSpan(telemetryId id, uint64_t start, bool force, bool fromAS = false) = 0;
         virtual void WriteTime(telemetryId) = 0;    // Writes a time metric with microsecond timestamp.
     protected:
         void SetActive(bool isActive)   { m_active = isActive; }
     private:
         bool m_active;
+        int32_t m_spanNestLevel;
+
+        friend class TelemetryMethod;
     };
 
 
     // Used for measuring the duration of a method or any block of code.
     // Declare this on the stack and when the block exits, the destructor will log the time delta.
     class TelemetryMethod
     {
     public:
         TelemetryMethod(ITelemetry* telem, telemetryId id, bool force=false)
         {
             Init(telem, id, force);
         }
 
         virtual ~TelemetryMethod()
         {
-            if (m_telemetry && m_telemetry->IsActive()) {
-                m_telemetry->WriteSpan(m_id, m_start, m_force);
+            if (m_telemetry) {
+                if (m_telemetry->IsActive())
+                    m_telemetry->WriteSpan(m_id, m_start, m_force);
+                DecrementSpanNestLevel();
             }
         }
 
+    protected:
+        void DecrementSpanNestLevel()
+        {
+            AvmAssert(m_telemetry->m_spanNestLevel > 0);
+            if (m_telemetry->m_spanNestLevel > 0)
+                m_telemetry->m_spanNestLevel--;
+        }
+
         void Init(ITelemetry* telem, telemetryId id, bool force)
         {
             m_id = id;
             m_telemetry = telem;
             m_force = force;
             if (m_telemetry && m_telemetry->IsActive()) {
                 m_start = (uint64_t)m_telemetry->GetElapsedTime();
             } else {
                 m_start = 0;
             }
+            if (m_telemetry) {
+                m_telemetry->m_spanNestLevel++;
+            }
         }
 
-    protected:
         bool m_force;
         telemetryId m_id;
         uint64_t m_start;
         ITelemetry *m_telemetry;
     };
 
 
 } // namespace telemetry
--- a/core/Isolate-inlines.h
+++ b/core/Isolate-inlines.h
@@ -201,33 +201,24 @@ namespace avmplus
 
     template <class T>
     Atom WorkerObjectBase<T>::getSharedProperty(String* key)
     {
         // the isolate can be NULL in the situation where a
         // worker is busy creating workers during a shutdown cycle,
         // as the logic for creating an isolate will return NULL 
         // in that situation.
+		Atom result = undefinedAtom;
         if (m_isolate)
         {
-		    Atom result = undefinedAtom;
 		    StUTF8String buf(key);
-		    ChannelItem* item;
-		    const bool cOk = m_isolate->getSharedProperty(buf, &item);
-		    if (cOk) 
-		    {
-			    result =  item->getAtom(self()->toplevel());
-		    }
+		    result = m_isolate->getSharedProperty(buf, self()->toplevel());
+        }
 
-            return result;
-        }
-        else
-        {
-            return undefinedAtom;
-        }
+		return result;
     }
 
     template <class T>
     void WorkerObjectBase<T>::internalStart()
     {
         // the isolate can be NULL in the situation where a
         // worker is busy creating workers during a shutdown cycle,
         // as the logic for creating an isolate will return NULL 
--- a/core/Isolate.cpp
+++ b/core/Isolate.cpp
@@ -376,27 +376,48 @@ throw_terminated_error:
                 keyInternal->deallocate();
                 mmfx_delete(keyInternal); // m_properties doesn't own the key
                 // We already removed.
             }
         }
         // else the key is owned by the hashmap
     }
 
-    bool Isolate::getSharedProperty(const StUTF8String& key, ChannelItem** item)
+    Atom Isolate::getSharedProperty(const StUTF8String& key, Toplevel* toplevel)
     {
-		bool result = false;
+		Atom result = undefinedAtom;
+		
+		AvmAssert(toplevel != NULL);
+		
         SCOPE_LOCK(m_sharedPropertyLock) {
+			ChannelItem* item = NULL;
+			
             Isolate::SharedPropertyNamep keyInternal = mmfx_new(FixedHeapArray<char>());
             keyInternal->values = (char*)key.c_str(); // it's OK, we won't touch it
             keyInternal->length = key.length();
-            result = m_sharedProperties.LookupItem(keyInternal, item);
+            const bool cFoundItem = m_sharedProperties.LookupItem(keyInternal, &item);
             keyInternal->values = NULL;
             mmfx_delete(keyInternal);
-        }
+			
+			if (cFoundItem)
+			{
+				AvmCore* core = toplevel->core();
+				TRY( core, kCatchAction_Ignore )
+				{
+					result = item->getAtom(toplevel);
+				}
+				CATCH (Exception* e)
+				{
+					(void) e;
+				}
+				END_CATCH
+				END_TRY
+			}
+       }
+		
         return result;
     }
 
     ChannelItem* Isolate::makeChannelItem(Toplevel* toplevel, Atom atom)
     {
         class DoubleChannelItem: public ChannelItem
         {
         public:
--- a/core/Isolate.h
+++ b/core/Isolate.h
@@ -131,17 +131,17 @@ namespace avmplus
 
         virtual void releaseActiveResources();
  		vmbase::RecursiveMutex m_sharedPropertyLock; 
 		SharedPropertyMap m_sharedProperties;
 
     public:
         void clearThread();
         void setSharedProperty(const StUTF8String& key, ChannelItem* item);
-        bool getSharedProperty(const StUTF8String& key, ChannelItem** outItem);
+        Atom getSharedProperty(const StUTF8String& key, Toplevel* toplevel);
         virtual ChannelItem* makeChannelItem(Toplevel* toplevel, Atom atom);
         void signalInterruptibleState();
         virtual bool retryInterruptibleState();
 
 
         /*
          * InterruptibleState provides basic management 
          * for any objects running within an isolate that 
--- a/nanojit/Assembler.cpp
+++ b/nanojit/Assembler.cpp
@@ -1411,21 +1411,19 @@ typedef void* (*decode_instructions_ftyp
     {
         NanoAssert(ins->opcode() == LIR_brsavpc);
         LIns* cond = ins->oprnd1();
 
         countlir_jcc();
         LIns* to = ins->getTarget();
         LabelState *label = _labels.get(to);
 		NanoAssert(label && label->addr);
-		{
-            // Forward jump to known label.  Need to merge with label's register state.
-            unionRegisterState(label->regs);
-            asm_brsavpc_impl(cond, label->addr);
-        }
+        // Forward jump to known label.  Need to merge with label's register state.
+        unionRegisterState(label->regs);
+        asm_brsavpc_impl(cond, label->addr);
     }
 #endif
 
     void Assembler::asm_jov(LIns* ins, InsList& pending_lives)
     {
         // The caller is responsible for countlir_* profiling, unlike
         // asm_jcc above.  The reason for this is that asm_jov may not be
         // be called if the instruction is dead, and it is our convention
--- a/nanojit/LIR.h
+++ b/nanojit/LIR.h
@@ -1068,17 +1068,17 @@ NanoStaticAssert(LIR_start == 0 && LIR_s
             return isImmIorF() || isImmQorD() || isImmF4();
         }
 
         bool isConditionalBranch() const {
             return isop(LIR_jt) || isop(LIR_jf) || isop(LIR_brsavpc) || isJov();
         }
 
         bool isUnConditionalBranch() const {
-            return isop(LIR_j) || isop(LIR_jtbl) || isop(LIR_brsavpc);
+            return isop(LIR_j) || isop(LIR_jtbl);
         }
 
         bool isBranch() const {
             return isConditionalBranch() || isUnConditionalBranch();
         }
 
         LTy retType() const {
             return retTypes[opcode()];
--- a/nanojit/NativeX64.cpp
+++ b/nanojit/NativeX64.cpp
@@ -1615,29 +1615,29 @@ namespace nanojit
         POPR(R14);
         POPR(R15);
         ADDQRI(RSP, 32);
     }
 
     void Assembler::asm_brsavpc_impl(LIns* flag, NIns* target)
     {
         Register r = findRegFor(flag, GpRegs);
-        underrunProtect(20);
+        underrunProtect(19);
     
         // discard pc
-        ADDQRI(RSP, 16);  
+        ADDQR8(RSP, 16);  
         
         // handle interrupt call
         JNE(0, target);  
         
         // save pc
-        emit(X64_call);  
+        emit(X64_call); // call with displacement 0  
         
-        CMPQRI(r, 0);   
-        SUBQRI(RSP, 8); 
+        CMPQR8(r, 0);   
+        SUBQR8(RSP, 8); 
     }
 
     void Assembler::asm_restorepc()
     {
         underrunProtect(9);
         // jmp dword ptr [rsp]
         emit(0x2424FF0000000003LL);
         // add qword ptr [rsp],6
--- a/nanojit/nanojit.h
+++ b/nanojit/nanojit.h
@@ -11,17 +11,17 @@
 #include "njcpudetect.h"
 
 #ifdef FEATURE_NANOJIT
 
 #if defined AVMPLUS_IA32
     #define NANOJIT_IA32
 #elif defined AVMPLUS_ARM
     #if defined(TARGET_THUMB2) || defined(UNDER_RT)
-         #define NANOJIT_THUMB2 1
+         #define NANOJIT_THUMB2
     #else
          #define NANOJIT_ARM
     #endif
 #elif defined AVMPLUS_PPC
     #define NANOJIT_PPC
 #elif defined AVMPLUS_SPARC
     #define NANOJIT_SPARC
 #elif defined AVMPLUS_AMD64
--- a/platform/win32/win32-platform.h
+++ b/platform/win32/win32-platform.h
@@ -245,22 +245,30 @@ typedef unsigned __int64    uint64_t;
 */
 struct vmpi_spin_lock_t
 {
     volatile LONG lock;
 };
 
 REALLY_INLINE void VMPI_lockInit(vmpi_spin_lock_t* lock)
 {
+#ifdef _M_ARM_FP
+	__iso_volatile_store32(( __int32 *)&lock->lock,0);
+#else //#ifdef _M_ARM_FP
     lock->lock = 0;
+#endif //#ifdef _M_ARM_FP
 }
 
 REALLY_INLINE void VMPI_lockDestroy(vmpi_spin_lock_t* lock)
 {
+#ifdef _M_ARM_FP
+	__iso_volatile_store32(( __int32 *)&lock->lock,0);
+#else //#ifdef _M_ARM_FP
     lock->lock = 0;
+#endif //#ifdef _M_ARM_FP
 }
 
 REALLY_INLINE bool VMPI_lockAcquire(vmpi_spin_lock_t *lock)
 {
     int tries = 0;
     while (::InterlockedCompareExchange((LPLONG)&lock->lock, 1, 0) != 0)
     {
         ++tries;