bug 757375: workers and shareable ByteArray bug fixes (p=dtomack,r=jasowill)
authorDan Schaffer <Dan.Schaffer@adobe.com>
Thu, 09 Aug 2012 06:14:57 -0700
changeset 7524 7267380d93542a39fa6ef5daaea0a8c9a8997ec7
parent 7523 5b15da1e8285c6c7e2a69926a778d2a92efaf884
child 7525 c4affe1c9c2c15e0311623f80e77d0213e75d66f
push id4255
push userdschaffe@adobe.com
push dateThu, 09 Aug 2012 13:25:42 +0000
reviewersjasowill
bugs757375, 1097229, 1098567
bug 757375: workers and shareable ByteArray bug fixes (p=dtomack,r=jasowill) integrate CL: 1097229 CL@1098567
core/ByteArrayGlue.cpp
core/Isolate.cpp
core/Isolate.h
vmbase/Safepoint.cpp
--- a/core/ByteArrayGlue.cpp
+++ b/core/ByteArrayGlue.cpp
@@ -388,22 +388,16 @@ namespace avmplus
         m_owner->m_buffer->array = newArray;
         m_owner->m_buffer->capacity = newCapacity;
         if (m_owner->m_copyOnWriteOwner != NULL)
         {
             m_owner->m_copyOnWriteOwner = NULL;
             // Set this to NULL so we don't attempt to delete it in our dtor.
             m_oldArray = NULL;
         }
-
-        if (vmbase::SafepointRecord::hasCurrent()) {
-            if (vmbase::SafepointRecord::current()->manager()->inSafepointTask()) {
-                AvmCore::getActiveCore()->getIsolate()->getAggregate()->reloadGlobalMemories();
-            }
-        }
     }
 
     /*
         Why the "Grower" class?
         
         (1) It provides a clean way to defer discarding the old buffer until the
             end of the calling function; this matters in the case of Write(),
             as it's legal to call Write() on your own buffer, and so if growth
@@ -709,16 +703,24 @@ namespace avmplus
             } 
         }
         
         m_owner->m_buffer->length = newLength;
         
         if (m_owner->m_position > newLength) {
             m_owner->m_position = newLength;
         }
+
+        // anytime the length of the bytearray is updated we have to notify all isolates
+        // regardless of if the backing store is reallocated or not.
+        if (vmbase::SafepointRecord::hasCurrent()) {
+            if (vmbase::SafepointRecord::current()->manager()->inSafepointTask()) {
+                AvmCore::getActiveCore()->getIsolate()->getAggregate()->reloadGlobalMemories();
+            }
+        }
     }
 
 
     void FASTCALL ByteArray::SetLength(uint32_t newLength)
     {
         SetLengthCommon(newLength, false);
     }
 
@@ -955,27 +957,40 @@ namespace avmplus
             SetCopyOnWriteOwner(origCopyOnWriteOwner);
 
             m_toplevel->core()->throwException(exn);
         }
 
         destOverlay = (struct lzma_compressed*) m_buffer->array;
         destLen = m_buffer->capacity - lzmaHeaderSize;
 
+        // if this bytearray's data is being shared then we have to
+        // snap shot it before we run any compression algorithm otherwise
+        // the data could be changed in a way that may cause an exploit
+        uint8_t* dataSnapshot = origData;
+        if (shared) {
+            dataSnapshot = mmfx_new_array(uint8_t, origLen);
+            VMPI_memcpy(dataSnapshot, origData, origLen);
+        }
+
         retcode = LzmaCompress(destOverlay->compressedPayload, &destLen,
-                               origData, origLen,
+                               dataSnapshot, origLen,
                                destOverlay->lzmaProps, &lzmaPropsSize,
                                9, // -1 would yield default level (5),
                                1<<20, // 0 would yield default dictSize (1<<24)
                                -1,  // default lc (3),
                                -1,  // default lp (0),
                                -1,  // default pb (2),
                                -1,  // default fb (32),
                                1); // -1 would yield default numThreads (2)
 
+        if (shared) {
+            mmfx_delete_array(dataSnapshot);
+        }
+
         switch (retcode) {
         case SZ_OK:
             if (destLen > (m_buffer->capacity - lzmaHeaderSize)) {
                 AvmAssertMsg(false, "LZMA broke its contract.");
 
                 // Belt-and-suspenders: If control gets here,
                 // something is terribly wrong, and LZMA is lying to
                 // us.  Rather than risk establishing a bogus structure,
@@ -1045,18 +1060,24 @@ namespace avmplus
         uint8_t* origData                       = m_buffer->array;
         uint32_t origLen                        = m_buffer->length;
         uint32_t origCap                        = m_buffer->capacity;
         MMgc::GCObject* origCopyOnWriteOwner    = m_copyOnWriteOwner;
         if (!origLen) // empty buffer should give empty result
             return;
 
         bool shared = IsShared();
+        // if this bytearray's data is being shared then we have to
+        // snap shot it before we run any compression algorithm otherwise
+        // the data could be changed in a way that may cause an exploit
+        uint8_t* dataSnapshot = origData;
         FixedHeapRef<Buffer> origBuffer = m_buffer;
         if (shared) {
+            dataSnapshot = mmfx_new_array(uint8_t, origLen);
+            VMPI_memcpy(dataSnapshot, origData, origLen);
             m_buffer = mmfx_new(Buffer());
         }
 
         m_buffer->array    = NULL;
         m_buffer->length   = 0;
         m_buffer->capacity = 0;
         m_position         = 0;
         m_copyOnWriteOwner = NULL;
@@ -1079,17 +1100,17 @@ namespace avmplus
                                 algorithm == k_zlib ? MAX_WBITS : MAX_WINDOW_RAW_DEFLATE,
                                 DEFAULT_MEMORY_USE,
                                 Z_DEFAULT_STRATEGY);
         AvmAssert(error == Z_OK);
 
         uint32_t newCap = deflateBound(&stream, origLen);
         EnsureCapacity(newCap);
 
-        stream.next_in = origData;
+        stream.next_in = dataSnapshot;
         stream.avail_in = origLen;
         stream.next_out = m_buffer->array;
         stream.avail_out = m_buffer->capacity;
 
         error = deflate(&stream, Z_FINISH);
         AvmAssert(error == Z_STREAM_END);
 
         m_buffer->length = stream.total_out;
@@ -1099,16 +1120,17 @@ namespace avmplus
         // but Uncompress() has always ended with position == 0.
         // Weird, but we must maintain it.
         m_position = m_buffer->length;
 
         deflateEnd(&stream);
 
         if (shared)
         {
+            mmfx_delete_array(dataSnapshot);
             ByteArraySwapBufferTask task(this, origBuffer);
             task.exec();
         }
         // Note: the Compress() method has never reported an error for corrupted data,
         // so we won't start now. (Doing so would probably require a version check,
         // to avoid breaking content that relies on misbehavior.)
         if (origData && origData != m_buffer->array && origCopyOnWriteOwner == NULL)
         {
@@ -1142,38 +1164,50 @@ namespace avmplus
         MMgc::GCObject* origCopyOnWriteOwner    = m_copyOnWriteOwner;
 
         if (!origLen) // empty buffer should give empty result
             return;
 
         if (!m_buffer->array || m_buffer->length < lzmaHeaderSize)
             return;
 
+        // if this bytearray's data is being shared then we have to
+        // snap shot it before we run any compression algorithm otherwise
+        // the data could be changed in a way that may cause an exploit
+        uint8_t* dataSnapshot = origData;
+        bool shared = IsShared();
+        if (shared) {
+            dataSnapshot = mmfx_new_array(uint8_t, origLen);
+            VMPI_memcpy(dataSnapshot, origData, origLen);
+        }
+
         struct lzma_compressed *srcOverlay;
-        srcOverlay = (struct lzma_compressed*)origData;
+        srcOverlay = (struct lzma_compressed*)dataSnapshot;
 
         uint32_t unpackedLen;
         unpackedLen  =  (uint32_t)srcOverlay->unpackedSize[0];
         unpackedLen +=  (uint32_t)srcOverlay->unpackedSize[1] << 8;
         unpackedLen +=  (uint32_t)srcOverlay->unpackedSize[2] << 16;
         unpackedLen +=  (uint32_t)srcOverlay->unpackedSize[3] << 24;
 
         // check that size is reasonable before modifying internal structure.
         if (srcOverlay->unpackedSizeHighBits[0] != 0 ||
             srcOverlay->unpackedSizeHighBits[1] != 0 ||
             srcOverlay->unpackedSizeHighBits[2] != 0 ||
             srcOverlay->unpackedSizeHighBits[3] != 0)
         {
+            if (shared) {
+                mmfx_delete_array(dataSnapshot);
+            }
             // We can't allocate a byte array of such large size.
             ThrowMemoryError();
         }
 
         size_t srcLen = (origLen - lzmaHeaderSize);
 
-        bool shared = IsShared();
         FixedHeapRef<Buffer> origBuffer = m_buffer;
         if (shared) {
             m_buffer = mmfx_new(Buffer());
         }
 
         m_buffer->array    = NULL;
         m_buffer->length   = 0;
         m_buffer->capacity = 0;
@@ -1204,16 +1238,21 @@ namespace avmplus
         }
 
         int retcode;
         size_t destLen = unpackedLen;
 
         retcode = LzmaUncompress(m_buffer->array, &destLen,
                                  srcOverlay->compressedPayload, &srcLen,
                                  srcOverlay->lzmaProps, LZMA_PROPS_SIZE);
+
+        if (shared) {
+            mmfx_delete_array(dataSnapshot);
+        }
+
         switch (retcode) {
         case SZ_OK:                // - OK
             if (destLen != unpackedLen) {
                 // Belt-and-suspenders: If control gets here,
                 // something is terribly wrong, and either LZMA is
                 // lying, or the lzma header in source byte array got
                 // garbled.  Rather than risk establishing a bogus
                 // structure, fail as if lzma returned an error code.
@@ -1277,18 +1316,24 @@ namespace avmplus
         uint32_t origLen                        = m_buffer->length;
         uint32_t origPos                        = m_position;
         MMgc::GCObject* origCopyOnWriteOwner    = m_copyOnWriteOwner;
         if (!origLen) // empty buffer should give empty result
             return;
 
         bool shared = IsShared();
         FixedHeapRef<Buffer> origBuffer = m_buffer;
+        // if this bytearray's data is being shared then we have to
+        // snap shot it before we run any compression algorithm otherwise
+        // the data could be changed in a way that may cause an exploit
+        uint8_t* dataSnapshot = origData;
         if (shared) {
             m_buffer = mmfx_new(Buffer());
+            dataSnapshot = mmfx_new_array(uint8_t, origLen);
+            VMPI_memcpy(dataSnapshot, origData, origLen);
         }
 
         m_buffer->array    = NULL;
         m_buffer->length   = 0;
         m_buffer->capacity = 0;
         m_position         = 0;
         m_copyOnWriteOwner = NULL;
         // we know that the uncompressed data will be at least as
@@ -1301,30 +1346,34 @@ namespace avmplus
 
         int error = Z_OK;
         
         z_stream stream;
         VMPI_memset(&stream, 0, sizeof(stream));
         error = inflateInit2(&stream, algorithm == k_zlib ? 15 : -15);
         AvmAssert(error == Z_OK);
 
-        stream.next_in = origData;
+        stream.next_in = dataSnapshot;
         stream.avail_in = origLen;
         while (error == Z_OK)
         {
             stream.next_out = scratch;
             stream.avail_out = kScratchSize;
             error = inflate(&stream, Z_NO_FLUSH);
             Write(scratch, kScratchSize - stream.avail_out);
         }
 
         inflateEnd(&stream);
 
         mmfx_delete_array(scratch);
 
+        if (shared) {
+            mmfx_delete_array(dataSnapshot);
+        }
+
         if (error == Z_STREAM_END)
         {
             if (shared) {
                 ByteArraySwapBufferTask task(this, origBuffer);
                 task.exec();
             }
             // everything is cool
             if (origData && origData != m_buffer->array && origCopyOnWriteOwner == NULL)
@@ -2271,21 +2320,17 @@ namespace avmplus
     }
 
     int32_t ByteArray::UnprotectedAtomicCompareAndSwapLength(int32_t expectedLength, int32_t newLength)
     {
         int32_t result = GetLength();
         if (expectedLength == result) {
             const bool CalledFromAS3Setter = true;
             Grower grower(this, newLength);
-            bool reloadGlobals = !grower.RequestWillReallocBackingStore();
             grower.SetLengthCommon(newLength, CalledFromAS3Setter);
-            if (reloadGlobals  && IsShared()) {
-                AvmCore::getActiveCore()->getIsolate()->getAggregate()->reloadGlobalMemories();
-            }
         }
         return result;
     }
 
     void ByteArrayObject::set_shareable(bool val)
     {
         m_byteArray.setWorkerLocal(!val);
     }
--- a/core/Isolate.cpp
+++ b/core/Isolate.cpp
@@ -79,19 +79,16 @@ namespace avmplus
         // the object vtable will be that of the parent, so the overriden DestroyItem() won't 
         // be called! We'll call Clear() here first, the subsequent Clear() will have nothing to do. 
     }
 
 
     Aggregate::Aggregate()
         : m_primordialGiid(1) // eventually there will be many of those
         , m_activeIsolateCount(0)
-        , m_commInProgress(0)
-        , m_msgInTransit(0)
-        , m_blockedChannels(0)
         , m_inShutdown(false)
     {}
 
     Aggregate::~Aggregate()
     {
         m_threadCleanUps.deallocate();
     }
 
@@ -115,22 +112,16 @@ namespace avmplus
 
             if (isolate->m_state == Isolate::NEW || isolate->m_state > Isolate::FINISHING) {
                 return false; // not gonna wait 
             }
 
             if (isolate->m_state == Isolate::RUNNING || isolate->m_state == Isolate::STARTING) {
                 isolate->interrupt();
             }
-            // Ensure that the write to AvmCore::interrupted is visible to the other thread.
-            // First set interrupted status, then notify 
-            // there may be some isolates waiting for the call requests
-            SCOPE_LOCK_NAMED(lk, m_commlock) {
-                lk.notifyAll();
-            }
             result = true;
 
             SCOPE_LOCK(m_globals->m_isolateMap.m_lock) { 
                 m_globals->m_isolateMap.RemoveItem(desc);
             }
 
             AvmCore* core = isolate->m_core;
             if (core == NULL) {
@@ -161,21 +152,16 @@ namespace avmplus
         return false;
     }
 
 
     void Aggregate::requestAggregateExit()
     {
         SCOPE_LOCK(m_globals->m_lock) {
             m_inShutdown = true;
-            SCOPE_LOCK_NAMED(lk, m_commlock) {
-                // there may be some isolates waiting for the call requests
-                lk.notifyAll();
-            }
-
             // this iterator intterupts every isolate's core
             class IsolateCoreInterrupt: public Globals::IsolateMap::Iterator
             {
                 Aggregate* m_aggregate;
             public:
                 IsolateCoreInterrupt(Aggregate* aggregate): m_aggregate(aggregate) {}
                 virtual void each(int32_t giid, FixedHeapRef<Isolate> isolate) 
                 {
@@ -227,22 +213,16 @@ namespace avmplus
             //      when the condition hasn't been met.
             //  (2) needs to release resources (unlock any held Mutex or Condition, clear shared properties, etc)
             IsolateCoreInterrupt iInterrupt(this);
             ReleaseResources iRelease(this);
             SCOPE_LOCK(m_globals->m_isolateMap.m_lock) {
                 m_globals->m_isolateMap.ForEach(iInterrupt);
                 m_globals->m_isolateMap.ForEach(iRelease);
             }
-            SCOPE_LOCK_NAMED(lk, m_commlock) {
-                // Wake up isolates in waitForAnySend()
-                // Note: waitForAnySend() removed, remove notification?
-                lk.notifyAll();
-            }
-
         }
     }
 
     /* virtual */
     void Aggregate::throwWorkerTerminatedException(Toplevel* currentToplevel)
     {
         AvmCore* core = currentToplevel->core();
         AvmAssert(core->getIsolate()->getAggregate() == this);
@@ -265,17 +245,16 @@ namespace avmplus
         , m_state(Isolate::NEW)
         , m_failed(false)
         , m_interrupted(false)
     {
     }
 
     void Aggregate::stateTransition(Isolate* isolate, Isolate::State to)
     {
-        AvmAssert(!m_commlock.isLockedByCurrentThread());
         SCOPE_LOCK(m_globals->m_lock) {
             enum Isolate::State from = isolate->m_state;
             bool verbose = false;
             if (verbose) {
                 
                 static const char* state_names[] = {
                     "NONE",
                     "NEW",
@@ -716,21 +695,23 @@ namespace avmplus
         if (timeout == -1) {
             cond.wait();
         }
         else {
             result = cond.wait(timeout);
         }
         DEBUG_STATE(("thread %d is awake\n", VMPI_currentThread()));
 
-        if (isolate) {
+        if (isolate) 
+		{
             isolate->invalidateActiveWaitRecord(cond.getMonitor());
+            // we want to interrupt if the isolate was interrupted OR if a script timeout has fired (only happens in the primordial)
+			interrupted = isolate->isInterrupted() || isolate->targetCore()->interruptCheckReason(AvmCore::ScriptTimeout);
+
         }
-        
-        interrupted = isolate ? isolate->isInterrupted() : false;
     }
 
     InterruptableState::InterruptableState()
     {
 #ifdef DEBUG
         gid = ++globalId;
 #endif // DEBUG
     }
@@ -1002,17 +983,17 @@ namespace avmplus
                 : m_aggregate(aggregate)
                 , m_workerVector(workerVector)
                 , m_toplevel(toplevel)
                 , m_index(0)
             {}
             virtual void each(int32_t, FixedHeapRef<Isolate> isolate) 
             {
                 //  Only list workers that are in the RUNNING state
-                if (isolate->m_state == Isolate::RUNNING)
+                if (isolate->m_aggregate->queryState(isolate) == Isolate::RUNNING)
                 {
                     GCRef<ScriptObject> interned = m_toplevel->getInternedObject(isolate);
                     if (interned == NULL) {
                         interned = isolate->workerObject(m_toplevel);
                     }
                     m_workerVector->setUintProperty(m_index++, interned->atom());
                 }
             }
--- a/core/Isolate.h
+++ b/core/Isolate.h
@@ -360,20 +360,16 @@ namespace avmplus
         {
             return m_inShutdown;
         }
 
     private:
         static Globals* m_globals;
         int32_t m_primordialGiid;
         int m_activeIsolateCount;
-        vmbase::WaitNotifyMonitor m_commlock; // protects all communication (ouch)
-        int32_t m_commInProgress;
-        int32_t m_msgInTransit; // sent but not received messages
-        int32_t m_blockedChannels; // channels waiting for resolution in waitForAnySend()
         vmbase::SafepointManager m_safepointMgr; // Currently for shared byte array only.
         bool m_inShutdown;
         
         FixedHeapArray<vmbase::VMThread*> m_threadCleanUps;
 
     };
 
     // Stack allocated, RAII pattern.
--- a/vmbase/Safepoint.cpp
+++ b/vmbase/Safepoint.cpp
@@ -59,19 +59,16 @@ namespace vmbase {
         , m_requester((vmpi_thread_t) 0)
         , m_hardwareConcurrency(VMPI_processorQtyAtBoot())
     {
     }
 
     SafepointManager::~SafepointManager()
     {
         assert(m_records == NULL);
-		// If a SafepointRecord is left on the list due to OOM not calling the leave() method, NULL it out
-        if(m_records != NULL)
-	        SafepointRecord::setCurrent(NULL);
     }
 
     void SafepointManager::requestSafepointTask(SafepointTask& task)
     {
         assert(SafepointRecord::hasCurrent());
         assert(SafepointRecord::current()->m_manager == this);
         assert(!inSafepointTask());