Bug 1575214 - remove gloda manual calls to garbage collection, which were causing lockup a times. r=florian
authorMagnus Melin <mkmelin+mozilla@iki.fi>
Mon, 09 Mar 2020 21:11:50 +0200
changeset 29061 58f8cf2d80892b5dec578a574222107b78d2ecf6
parent 29060 5384fda06708564328b64ccf7daf55ab4753cd3e
child 29062 17149d0a4ffcaf84cfcdc7b6b69e0ea68af1f947
push id17181
push userthunderbird@calypsoblue.org
push dateWed, 25 Mar 2020 01:59:23 +0000
treeherdercomm-central@58f8cf2d8089 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersflorian
bugs1575214
Bug 1575214 - remove gloda manual calls to garbage collection, which were causing lockup a times. r=florian
mailnews/db/gloda/modules/GlodaIndexer.jsm
mailnews/db/gloda/modules/GlodaUtils.jsm
mailnews/db/gloda/modules/IndexMsg.jsm
--- a/mailnews/db/gloda/modules/GlodaIndexer.jsm
+++ b/mailnews/db/gloda/modules/GlodaIndexer.jsm
@@ -1276,19 +1276,16 @@ var GlodaIndexer = {
         // Set up an async notification to happen after the commit completes so
         //  that we can avoid the indexer doing something with the database that
         //  causes the main thread to block against the completion of the commit
         //  (which can be a while) on 1.9.1.
         GlodaDatastore.runPostCommit(this._callbackHandle.wrappedCallback);
         // kick off the commit
         GlodaDatastore._commitTransaction();
         yield this.kWorkAsync;
-        // Let's do the GC after the commit completes just so we can avoid
-        //  having any ugly interactions.
-        GlodaUtils.forceGarbageCollection(false);
         this._lastCommitTime = Date.now();
         // Restart the transaction if we still have work.
         if (haveMoreWork) {
           GlodaDatastore._beginTransaction();
         } else {
           transactionToCommit = false;
         }
       }
--- a/mailnews/db/gloda/modules/GlodaUtils.jsm
+++ b/mailnews/db/gloda/modules/GlodaUtils.jsm
@@ -99,62 +99,9 @@ var GlodaUtils = {
         if (cardForEmailAddress) {
           return cardForEmailAddress;
         }
       } catch (ex) {}
     }
 
     return null;
   },
-
-  _FORCE_GC_AFTER_NUM_HEADERS: 4096,
-  _headersSeen: 0,
-  /**
-   * As |forceGarbageCollection| says, once XPConnect sees a header, it likes
-   *  to hold onto that reference.  This method is used to track the number of
-   *  headers we have seen and force a GC when we have to.
-   *
-   * Ideally the indexer's idle-biased GC mechanism would take care of all the
-   *  GC; we are just a failsafe to make sure that our memory usage is bounded
-   *  based on the number of headers we have seen rather than just time.
-   *  Since holding onto headers can keep databases around too, this also
-   *  helps avoid keeping file handles open, etc.
-   *
-   * |forceGarbageCollection| will zero our tracking variable when a GC happens
-   *  so we are informed by the indexer's GC triggering.
-   *
-   * And of course, we don't want to trigger collections willy nilly because
-   *  they have a cost even if there is no garbage.
-   *
-   * @param aNumHeadersSeen The number of headers code has seen.  A granularity
-   *     of hundreds of messages should be fine.
-   */
-  considerHeaderBasedGC(aNumHeadersSeen) {
-    this._headersSeen += aNumHeadersSeen;
-    if (this._headersSeen >= this._FORCE_GC_AFTER_NUM_HEADERS) {
-      this.forceGarbageCollection();
-    }
-  },
-
-  /**
-   * Force a garbage-collection sweep.  Gloda has to force garbage collection
-   *  periodically because XPConnect's XPCJSRuntime::DeferredRelease mechanism
-   *  can end up holding onto a ridiculously high number of XPConnect objects in
-   *  between normal garbage collections.  This has mainly posed a problem
-   *  because nsAutolock is a jerk in DEBUG builds in 1.9.1, but in theory this
-   *  also helps us even out our memory usage.
-   * We also are starting to do this more to try and keep the garbage collection
-   *  durations acceptable.  We intentionally avoid triggering the cycle
-   *  collector in those cases, as we do presume a non-trivial fixed cost for
-   *  cycle collection.  (And really all we want is XPConnect to not be a jerk.)
-   * This method exists mainly to centralize our GC activities and because if
-   *  we do start involving the cycle collector, that is a non-trivial block of
-   *  code to copy-and-paste all over the place (at least in a module).
-   *
-   * @param aCycleCollecting Do we need the cycle collector to run?  Currently
-   *     unused / unimplemented, but we would use
-   *     nsIDOMWindowUtils.garbageCollect() to do so.
-   */
-  forceGarbageCollection(aCycleCollecting) {
-    Cu.forceGC();
-    this._headersSeen = 0;
-  },
 };
--- a/mailnews/db/gloda/modules/IndexMsg.jsm
+++ b/mailnews/db/gloda/modules/IndexMsg.jsm
@@ -1112,21 +1112,16 @@ var GlodaMsgIndexer = {
   },
 
   /**
    * The number of headers to look at before yielding with kWorkSync.  This
    *  is for time-slicing purposes so we still yield to the UI periodically.
    */
   HEADER_CHECK_SYNC_BLOCK_SIZE: 25,
 
-  /**
-   * The number of headers to look at before calling
-   */
-  HEADER_CHECK_GC_BLOCK_SIZE: 256,
-
   FOLDER_COMPACTION_PASS_BATCH_SIZE: 512,
   /**
    * Special indexing pass for (local) folders than have been compacted.  The
    *  compaction can cause message keys to change because message keys in local
    *  folders are simply offsets into the mbox file.  Accordingly, we need to
    *  update the gloda records/objects to point them at the new message key.
    *
    * Our general algorithm is to perform two traversals in parallel.  The first
@@ -1194,17 +1189,16 @@ var GlodaMsgIndexer = {
     if (!this._indexingGlodaFolder.compacted) {
       yield this.kWorkDone;
     }
 
     // this is a forward enumeration (sometimes we reverse enumerate; not here)
     this._indexerGetEnumerator(this.kEnumIndexedMsgs);
 
     const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
-    const HEADER_CHECK_GC_BLOCK_SIZE = this.HEADER_CHECK_GC_BLOCK_SIZE;
     const FOLDER_COMPACTION_PASS_BATCH_SIZE = this
       .FOLDER_COMPACTION_PASS_BATCH_SIZE;
 
     // Tuples of [gloda id, message key, message-id header] from
     //  folderCompactionPassBlockFetch
     let glodaIdsMsgKeysHeaderIds = [];
     // Unpack each tuple from glodaIdsMsgKeysHeaderIds into these guys.
     // (Initialize oldMessageKey because we use it to kickstart our query.)
@@ -1245,20 +1239,16 @@ var GlodaMsgIndexer = {
       }
 
       if (msgHdr) {
         numHeadersSeen++;
         if (numHeadersSeen % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
           yield this.kWorkSync;
         }
 
-        if (numHeadersSeen % HEADER_CHECK_GC_BLOCK_SIZE == 0) {
-          GlodaUtils.considerHeaderBasedGC(HEADER_CHECK_GC_BLOCK_SIZE);
-        }
-
         // There is no need to check with PendingCommitTracker.  If a message
         //  somehow got indexed between the time the compaction killed
         //  everything and the time we run, that is a bug.
         glodaId = msgHdr.getUint32Property(GLODA_MESSAGE_ID_PROPERTY);
         // (there is also no need to check for gloda dirty since the enumerator
         //  filtered that for us.)
       }
 
@@ -1412,17 +1402,16 @@ var GlodaMsgIndexer = {
     }
 
     // Make sure listeners get notified about this job.
     GlodaIndexer._notifyListeners();
 
     // there is of course a cost to all this header investigation even if we
     //  don't do something.  so we will yield with kWorkSync for every block.
     const HEADER_CHECK_SYNC_BLOCK_SIZE = this.HEADER_CHECK_SYNC_BLOCK_SIZE;
-    const HEADER_CHECK_GC_BLOCK_SIZE = this.HEADER_CHECK_GC_BLOCK_SIZE;
 
     // we can safely presume if we are here that this folder has been selected
     //  for offline processing...
 
     // -- Filthy Folder
     // A filthy folder may have misleading properties on the message that claim
     //  the message is indexed.  They are misleading because the database, for
     //  whatever reason, does not have the messages (accurately) indexed.
@@ -1441,20 +1430,16 @@ var GlodaMsgIndexer = {
         this._indexingEnumerator,
         Ci.nsIMsgDBHdr
       )) {
         // we still need to avoid locking up the UI, pause periodically...
         if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
           yield this.kWorkSync;
         }
 
-        if (count % HEADER_CHECK_GC_BLOCK_SIZE == 0) {
-          GlodaUtils.considerHeaderBasedGC(HEADER_CHECK_GC_BLOCK_SIZE);
-        }
-
         let glodaMessageId = msgHdr.getUint32Property(
           GLODA_MESSAGE_ID_PROPERTY
         );
         // if it has a gloda message id, we need to mark it filthy
         if (glodaMessageId != 0) {
           msgHdr.setUint32Property(GLODA_DIRTY_PROPERTY, this.kMessageFilthy);
         }
         // if it doesn't have a gloda message id, we will definitely index it,
@@ -1510,20 +1495,16 @@ var GlodaMsgIndexer = {
         Ci.nsIMsgDBHdr
       )) {
         // per above, we want to periodically release control while doing all
         // this header traversal/investigation.
         if (++count % HEADER_CHECK_SYNC_BLOCK_SIZE == 0) {
           yield this.kWorkSync;
         }
 
-        if (count % HEADER_CHECK_GC_BLOCK_SIZE == 0) {
-          GlodaUtils.considerHeaderBasedGC(HEADER_CHECK_GC_BLOCK_SIZE);
-        }
-
         // To keep our counts more accurate, increment the offset before
         //  potentially skipping any messages.
         ++aJob.offset;
 
         // Skip messages that have not yet been reported to us as existing via
         //  msgsClassified.
         if (
           this._indexingFolder.getProcessingFlags(msgHdr.messageKey) &