bug 714280 - Make gcMaxBytes a hard limit. r=anygregor
authorIgor Bukanov <igor@mir2.org>
Thu, 05 Jan 2012 09:54:37 +0100
changeset 85202 48928463f9785a3518a2936c313de4546857c980
parent 85201 82883346ddfea4842d0cfad311f95835e3bb245b
child 85203 ed2a79ca496fa83166452e03f4be9d2cdb86f4f6
push id805
push userakeybl@mozilla.com
push dateWed, 01 Feb 2012 18:17:35 +0000
treeherdermozilla-aurora@6fb3bf232436 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersanygregor
bugs714280
milestone12.0a1
bug 714280 - Make gcMaxBytes a hard limit. r=anygregor
js/src/jsapi.cpp
js/src/jscntxt.cpp
js/src/jscntxt.h
js/src/jscompartment.h
js/src/jsgc.cpp
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -2890,19 +2890,22 @@ JS_IsAboutToBeFinalized(JSContext *cx, v
     JS_ASSERT(!cx->runtime->gcIncrementalTracer);
     return IsAboutToBeFinalized(cx, (gc::Cell *)thing);
 }
 
 JS_PUBLIC_API(void)
 JS_SetGCParameter(JSRuntime *rt, JSGCParamKey key, uint32_t value)
 {
     switch (key) {
-      case JSGC_MAX_BYTES:
+      case JSGC_MAX_BYTES: {
+        AutoLockGC lock(rt);
+        JS_ASSERT(value >= rt->gcBytes);
         rt->gcMaxBytes = value;
         break;
+      }
       case JSGC_MAX_MALLOC_BYTES:
         rt->setGCMaxMallocBytes(value);
         break;
       default:
         JS_ASSERT(key == JSGC_MODE);
         rt->gcMode = JSGCMode(value);
         JS_ASSERT(rt->gcMode == JSGC_MODE_GLOBAL ||
                   rt->gcMode == JSGC_MODE_COMPARTMENT);
@@ -2910,21 +2913,21 @@ JS_SetGCParameter(JSRuntime *rt, JSGCPar
     }
 }
 
 JS_PUBLIC_API(uint32_t)
 JS_GetGCParameter(JSRuntime *rt, JSGCParamKey key)
 {
     switch (key) {
       case JSGC_MAX_BYTES:
-        return rt->gcMaxBytes;
+        return uint32_t(rt->gcMaxBytes);
       case JSGC_MAX_MALLOC_BYTES:
         return rt->gcMaxMallocBytes;
       case JSGC_BYTES:
-        return rt->gcBytes;
+        return uint32_t(rt->gcBytes);
       case JSGC_MODE:
         return uint32_t(rt->gcMode);
       case JSGC_UNUSED_CHUNKS:
         return uint32_t(rt->gcChunkPool.getEmptyCount());
       case JSGC_TOTAL_CHUNKS:
         return uint32_t(rt->gcChunkSet.count() + rt->gcChunkPool.getEmptyCount());
       default:
         JS_ASSERT(key == JSGC_NUMBER);
@@ -5346,17 +5349,17 @@ JS_EvaluateUCScriptForPrincipalsVersion(
                                         const jschar *chars, uintN length,
                                         const char *filename, uintN lineno,
                                         jsval *rval, JSVersion version)
 {
     AutoVersionAPI avi(cx, version);
     return EvaluateUCScriptForPrincipalsCommon(cx, obj, principals, NULL, chars, length,
                                                filename, lineno, rval, avi.version());
 }
- 
+
 extern JS_PUBLIC_API(JSBool)
 JS_EvaluateUCScriptForPrincipalsVersionOrigin(JSContext *cx, JSObject *obj,
                                               JSPrincipals *principals,
                                               JSPrincipals *originPrincipals,
                                               const jschar *chars, uintN length,
                                               const char *filename, uintN lineno,
                                               jsval *rval, JSVersion version)
 {
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -777,19 +777,19 @@ PopulateReportBlame(JSContext *cx, JSErr
  * Instead we just invoke the errorReporter with an "Out Of Memory"
  * type message, and then hope the process ends swiftly.
  */
 void
 js_ReportOutOfMemory(JSContext *cx)
 {
     cx->runtime->hadOutOfMemory = true;
 
-    /* AtomizeInline can cal this indirectly when it creates the string. */
+    /* AtomizeInline can call this indirectly when it creates the string. */
     AutoUnlockAtomsCompartmentWhenLocked unlockAtomsCompartment(cx);
-    
+
     JSErrorReport report;
     JSErrorReporter onError = cx->errorReporter;
 
     /* Get the message for this error, but we won't expand any arguments. */
     const JSErrorFormatString *efs =
         js_GetLocalizedErrorMessage(cx, NULL, NULL, JSMSG_OUT_OF_MEMORY);
     const char *msg = efs ? efs->format : "Out of memory";
 
@@ -1242,23 +1242,16 @@ JSErrorFormatString js_ErrorFormatString
 JS_FRIEND_API(const JSErrorFormatString *)
 js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
 {
     if ((errorNumber > 0) && (errorNumber < JSErr_Limit))
         return &js_ErrorFormatString[errorNumber];
     return NULL;
 }
 
-bool
-checkOutOfMemory(JSRuntime *rt)
-{
-    AutoLockGC lock(rt);
-    return rt->gcBytes > rt->gcMaxBytes;
-}
-
 JSBool
 js_InvokeOperationCallback(JSContext *cx)
 {
     JSRuntime *rt = cx->runtime;
     ThreadData *td = JS_THREAD_DATA(cx);
 
     JS_ASSERT_REQUEST_DEPTH(cx);
     JS_ASSERT(td->interruptFlags != 0);
@@ -1270,44 +1263,19 @@ js_InvokeOperationCallback(JSContext *cx
      */
     JS_LOCK_GC(rt);
     td->interruptFlags = 0;
 #ifdef JS_THREADSAFE
     JS_ATOMIC_DECREMENT(&rt->interruptCounter);
 #endif
     JS_UNLOCK_GC(rt);
 
-    if (rt->gcIsNeeded) {
+    if (rt->gcIsNeeded)
         js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL, rt->gcTriggerReason);
 
-        /*
-         * On trace we can exceed the GC quota, see comments in NewGCArena. So
-         * we check the quota and report OOM here when we are off trace.
-         */
-        if (checkOutOfMemory(rt)) {
-#ifdef JS_THREADSAFE
-            /*
-            * We have to wait until the background thread is done in order
-            * to get a correct answer.
-            */
-            {
-                AutoLockGC lock(rt);
-                rt->gcHelperThread.waitBackgroundSweepEnd();
-            }
-            if (checkOutOfMemory(rt)) {
-                js_ReportOutOfMemory(cx);
-                return false;
-            }
-#else
-            js_ReportOutOfMemory(cx);
-            return false;
-#endif
-        }
-    }
-
 #ifdef JS_THREADSAFE
     /*
      * We automatically yield the current context every time the operation
      * callback is hit since we might be called as a result of an impending
      * GC on another thread, which would deadlock if we do not yield.
      * Operation callbacks are supposed to happen rarely (seconds, not
      * milliseconds) so it is acceptable to yield at every callback.
      *
--- a/js/src/jscntxt.h
+++ b/js/src/jscntxt.h
@@ -409,18 +409,18 @@ struct JSRuntime
      */
     js::gc::Chunk       *gcSystemAvailableChunkListHead;
     js::gc::Chunk       *gcUserAvailableChunkListHead;
     js::gc::ChunkPool   gcChunkPool;
 
     js::RootedValueMap  gcRootsHash;
     js::GCLocks         gcLocksHash;
     jsrefcount          gcKeepAtoms;
-    uint32_t            gcBytes;
-    uint32_t            gcTriggerBytes;
+    size_t              gcBytes;
+    size_t              gcTriggerBytes;
     size_t              gcLastBytes;
     size_t              gcMaxBytes;
     size_t              gcMaxMallocBytes;
 
     /*
      * Number of the committed arenas in all GC chunks including empty chunks.
      * The counter is volatile as it is read without the GC lock, see comments
      * in MaybeGC.
@@ -672,17 +672,17 @@ struct JSRuntime
     JSRuntime();
     ~JSRuntime();
 
     bool init(uint32_t maxbytes);
 
     JSRuntime *thisFromCtor() { return this; }
 
     void setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind);
-    void reduceGCTriggerBytes(uint32_t amount);
+    void reduceGCTriggerBytes(size_t amount);
 
     /*
      * Call the system malloc while checking for GC memory pressure and
      * reporting OOM error when cx is not null. We will not GC from here.
      */
     void* malloc_(size_t bytes, JSContext *cx = NULL) {
         updateMallocCounter(bytes);
         void *p = ::js_malloc(bytes);
--- a/js/src/jscompartment.h
+++ b/js/src/jscompartment.h
@@ -179,18 +179,18 @@ struct JS_FRIEND_API(JSCompartment) {
 
     js::GCMarker *barrierTracer() {
         JS_ASSERT(needsBarrier_);
         if (gcIncrementalTracer)
             return gcIncrementalTracer;
         return createBarrierTracer();
     }
 
-    uint32_t                     gcBytes;
-    uint32_t                     gcTriggerBytes;
+    size_t                       gcBytes;
+    size_t                       gcTriggerBytes;
     size_t                       gcLastBytes;
 
     bool                         hold;
     bool                         isSystemCompartment;
 
     /*
      * Pool for analysis and intermediate type information in this compartment.
      * Cleared on every GC, unless the GC happens during analysis (indicated
@@ -305,17 +305,17 @@ struct JS_FRIEND_API(JSCompartment) {
     bool wrap(JSContext *cx, js::PropertyDescriptor *desc);
     bool wrap(JSContext *cx, js::AutoIdVector &props);
 
     void markTypes(JSTracer *trc);
     void sweep(JSContext *cx, bool releaseTypes);
     void purge(JSContext *cx);
 
     void setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind);
-    void reduceGCTriggerBytes(uint32_t amount);
+    void reduceGCTriggerBytes(size_t amount);
 
     js::DtoaCache dtoaCache;
 
   private:
     js::MathCache                *mathCache;
 
     js::MathCache *allocMathCache(JSContext *cx);
 
--- a/js/src/jsgc.cpp
+++ b/js/src/jsgc.cpp
@@ -716,27 +716,30 @@ Chunk::fetchNextFreeArena(JSRuntime *rt)
 }
 
 ArenaHeader *
 Chunk::allocateArena(JSCompartment *comp, AllocKind thingKind)
 {
     JS_ASSERT(hasAvailableArenas());
 
     JSRuntime *rt = comp->rt;
+    JS_ASSERT(rt->gcBytes <= rt->gcMaxBytes);
+    if (rt->gcMaxBytes - rt->gcBytes < ArenaSize)
+        return NULL;
 
     ArenaHeader *aheader = JS_LIKELY(info.numArenasFreeCommitted > 0)
                            ? fetchNextFreeArena(rt)
                            : fetchNextDecommittedArena();
     aheader->init(comp, thingKind);
     if (JS_UNLIKELY(!hasAvailableArenas()))
         removeFromAvailableList();
 
     Probes::resizeHeap(comp, rt->gcBytes, rt->gcBytes + ArenaSize);
-    JS_ATOMIC_ADD(&rt->gcBytes, ArenaSize);
-    JS_ATOMIC_ADD(&comp->gcBytes, ArenaSize);
+    rt->gcBytes += ArenaSize;
+    comp->gcBytes += ArenaSize;
     if (comp->gcBytes >= comp->gcTriggerBytes)
         TriggerCompartmentGC(comp, gcstats::ALLOCTRIGGER);
 
     return aheader;
 }
 
 inline void
 Chunk::addArenaToFreeList(JSRuntime *rt, ArenaHeader *aheader)
@@ -758,26 +761,26 @@ Chunk::releaseArena(ArenaHeader *aheader
     JSRuntime *rt = comp->rt;
 #ifdef JS_THREADSAFE
     AutoLockGC maybeLock;
     if (rt->gcHelperThread.sweeping())
         maybeLock.lock(rt);
 #endif
 
     Probes::resizeHeap(comp, rt->gcBytes, rt->gcBytes - ArenaSize);
-    JS_ASSERT(size_t(rt->gcBytes) >= ArenaSize);
-    JS_ASSERT(size_t(comp->gcBytes) >= ArenaSize);
+    JS_ASSERT(rt->gcBytes >= ArenaSize);
+    JS_ASSERT(comp->gcBytes >= ArenaSize);
 #ifdef JS_THREADSAFE
     if (rt->gcHelperThread.sweeping()) {
         rt->reduceGCTriggerBytes(GC_HEAP_GROWTH_FACTOR * ArenaSize);
         comp->reduceGCTriggerBytes(GC_HEAP_GROWTH_FACTOR * ArenaSize);
     }
 #endif
-    JS_ATOMIC_ADD(&rt->gcBytes, -int32_t(ArenaSize));
-    JS_ATOMIC_ADD(&comp->gcBytes, -int32_t(ArenaSize));
+    rt->gcBytes -= ArenaSize;
+    comp->gcBytes -= ArenaSize;
 
     aheader->setAsNotAllocated();
     addArenaToFreeList(rt, aheader);
 
     if (info.numArenasFree == 1) {
         JS_ASSERT(!info.prevp);
         JS_ASSERT(!info.next);
         addToAvailableList(comp);
@@ -1375,17 +1378,17 @@ JSRuntime::setGCLastBytes(size_t lastByt
     gcLastBytes = lastBytes;
 
     size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, GC_ALLOCATION_THRESHOLD);
     float trigger = float(base) * GC_HEAP_GROWTH_FACTOR;
     gcTriggerBytes = size_t(Min(float(gcMaxBytes), trigger));
 }
 
 void
-JSRuntime::reduceGCTriggerBytes(uint32_t amount) {
+JSRuntime::reduceGCTriggerBytes(size_t amount) {
     JS_ASSERT(amount > 0);
     JS_ASSERT(gcTriggerBytes - amount >= 0);
     if (gcTriggerBytes - amount < GC_ALLOCATION_THRESHOLD * GC_HEAP_GROWTH_FACTOR)
         return;
     gcTriggerBytes -= amount;
 }
 
 void
@@ -1394,17 +1397,17 @@ JSCompartment::setGCLastBytes(size_t las
     gcLastBytes = lastBytes;
 
     size_t base = gckind == GC_SHRINK ? lastBytes : Max(lastBytes, GC_ALLOCATION_THRESHOLD);
     float trigger = float(base) * GC_HEAP_GROWTH_FACTOR;
     gcTriggerBytes = size_t(Min(float(rt->gcMaxBytes), trigger));
 }
 
 void
-JSCompartment::reduceGCTriggerBytes(uint32_t amount) {
+JSCompartment::reduceGCTriggerBytes(size_t amount) {
     JS_ASSERT(amount > 0);
     JS_ASSERT(gcTriggerBytes - amount >= 0);
     if (gcTriggerBytes - amount < GC_ALLOCATION_THRESHOLD * GC_HEAP_GROWTH_FACTOR)
         return;
     gcTriggerBytes -= amount;
 }
 
 namespace js {
@@ -1422,43 +1425,31 @@ ArenaLists::allocateFromArena(JSCompartm
     volatile uintptr_t *bfs = &backgroundFinalizeState[thingKind];
     if (*bfs != BFS_DONE) {
         /*
          * We cannot search the arena list for free things while the
          * background finalization runs and can modify head or cursor at any
          * moment. So we always allocate a new arena in that case.
          */
         maybeLock.lock(comp->rt);
-        for (;;) {
-            if (*bfs == BFS_DONE)
-                break;
-
-            if (*bfs == BFS_JUST_FINISHED) {
-                /*
-                 * Before we took the GC lock or while waiting for the
-                 * background finalization to finish the latter added new
-                 * arenas to the list.
-                 */
-                *bfs = BFS_DONE;
-                break;
-            }
-
+        if (*bfs == BFS_RUN) {
             JS_ASSERT(!*al->cursor);
             chunk = PickChunk(comp);
-            if (chunk)
-                break;
-
-            /*
-             * If the background finalization still runs, wait for it to
-             * finish and retry to check if it populated the arena list or
-             * added new empty arenas.
-             */
-            JS_ASSERT(*bfs == BFS_RUN);
-            comp->rt->gcHelperThread.waitBackgroundSweepEnd();
-            JS_ASSERT(*bfs == BFS_JUST_FINISHED || *bfs == BFS_DONE);
+            if (!chunk) {
+                /*
+                 * Let the caller to wait for the background allocation to
+                 * finish and restart the allocation attempt.
+                 */
+                return NULL;
+            }
+        } else if (*bfs == BFS_JUST_FINISHED) {
+            /* See comments before BackgroundFinalizeState definition. */
+            *bfs = BFS_DONE;
+        } else {
+            JS_ASSERT(*bfs == BFS_DONE);
         }
     }
 #endif /* JS_THREADSAFE */
 
     if (!chunk) {
         if (ArenaHeader *aheader = *al->cursor) {
             JS_ASSERT(aheader->hasFreeThings());
 
@@ -1492,16 +1483,19 @@ ArenaLists::allocateFromArena(JSCompartm
      * it to the list as a fully allocated arena.
      *
      * We add the arena before the the head, not after the tail pointed by the
      * cursor, so after the GC the most recently added arena will be used first
      * for allocations improving cache locality.
      */
     JS_ASSERT(!*al->cursor);
     ArenaHeader *aheader = chunk->allocateArena(comp, thingKind);
+    if (!aheader)
+        return NULL;
+
     aheader->next = al->head;
     if (!al->head) {
         JS_ASSERT(al->cursor == &al->head);
         al->cursor = &aheader->next;
     }
     al->head = aheader;
 
     /* See comments before allocateFromNewArena about this assert. */
@@ -1667,55 +1661,62 @@ RunLastDitchGC(JSContext *cx)
     JSRuntime *rt = cx->runtime;
 
     /* The atoms are locked when we create a string in AtomizeInline. */
     AutoUnlockAtomsCompartmentWhenLocked unlockAtomsCompartment(cx);
 
     /* The last ditch GC preserves all atoms. */
     AutoKeepAtoms keep(rt);
     js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL, gcstats::LASTDITCH);
-
-#ifdef JS_THREADSAFE
-    if (rt->gcBytes >= rt->gcMaxBytes) {
-        AutoLockGC lock(rt);
-        cx->runtime->gcHelperThread.waitBackgroundSweepEnd();
-    }
-#endif
 }
 
 /* static */ void *
 ArenaLists::refillFreeList(JSContext *cx, AllocKind thingKind)
 {
     JS_ASSERT(cx->compartment->arenas.freeLists[thingKind].isEmpty());
 
     JSCompartment *comp = cx->compartment;
     JSRuntime *rt = comp->rt;
     JS_ASSERT(!rt->gcRunning);
 
     bool runGC = !!rt->gcIsNeeded;
     for (;;) {
         if (JS_UNLIKELY(runGC)) {
             RunLastDitchGC(cx);
 
-            /* Report OOM of the GC failed to free enough memory. */
-            if (rt->gcBytes > rt->gcMaxBytes)
-                break;
-
             /*
              * The JSGC_END callback can legitimately allocate new GC
              * things and populate the free list. If that happens, just
              * return that list head.
              */
             size_t thingSize = Arena::thingSize(thingKind);
             if (void *thing = comp->arenas.allocateFromFreeList(thingKind, thingSize))
                 return thing;
         }
-        void *thing = comp->arenas.allocateFromArena(comp, thingKind);
-        if (JS_LIKELY(!!thing))
-            return thing;
+
+        /*
+         * allocateFromArena may fail while the background finalization still
+         * run. In that case we want to wait for it to finish and restart.
+         * However, checking for that is racy as the background finalization
+         * could free some things after allocateFromArena decided to fail but
+         * at this point it may have already stopped. To avoid this race we
+         * always try to allocate twice.
+         */
+        for (bool secondAttempt = false; ; secondAttempt = true) {
+            void *thing = comp->arenas.allocateFromArena(comp, thingKind);
+            if (JS_LIKELY(!!thing))
+                return thing;
+            if (secondAttempt)
+                break;
+
+            AutoLockGC lock(rt);
+#ifdef JS_THREADSAFE
+            rt->gcHelperThread.waitBackgroundSweepEnd();
+#endif
+        }
 
         /*
          * We failed to allocate. Run the GC if we haven't done it already.
          * Otherwise report OOM.
          */
         if (runGC)
             break;
         runGC = true;