Bug 470329 - gloda indexer throttling logic needs to be adaptive. r/sr=dmose.
authorAndrew Sutherland <asutherland@asutherland.org>
Thu, 26 Feb 2009 13:15:30 -0800
changeset 2091 a44909e6dcadb545283815b0f47553de014681b0
parent 2090 81b62dba7723f5139682c9a2bc12b2871e60ec00
child 2092 7c41136fa251ba5dafd11a2b6a56fb0b8ca0e695
push id1688
push userbugmail@asutherland.org
push dateThu, 26 Feb 2009 21:15:46 +0000
treeherdercomm-central@a44909e6dcad [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs470329
Bug 470329 - gloda indexer throttling logic needs to be adaptive. r/sr=dmose.
mailnews/base/build/nsMsgFactory.cpp
mailnews/base/public/Makefile.in
mailnews/base/public/nsIStopwatch.idl
mailnews/base/util/Makefile.in
mailnews/base/util/nsStopwatch.cpp
mailnews/base/util/nsStopwatch.h
mailnews/db/gloda/modules/indexer.js
mailnews/db/gloda/modules/utils.js
mailnews/db/gloda/test/resources/glodaTestHelper.js
mailnews/db/gloda/test/resources/mockIndexer.js
mailnews/db/gloda/test/resources/mockTimer.js
mailnews/db/gloda/test/unit/test_index_adaptive.js
--- a/mailnews/base/build/nsMsgFactory.cpp
+++ b/mailnews/base/build/nsMsgFactory.cpp
@@ -118,16 +118,18 @@
 #endif
 #if defined(MOZ_WIDGET_GTK) || defined(MOZ_WIDGET_GTK2)
 #include "nsMessengerUnixIntegration.h"
 #endif
 
 #include "nsCURILoader.h"
 #include "nsMessengerContentHandler.h"
 
+#include "nsStopwatch.h"
+
 // private factory declarations for each component we know how to produce
 
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMessengerBootstrap)
 NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMsgMailSession, Init)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMessenger)
 NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMsgAccountManager, Init)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMsgAccount)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMsgIdentity)
@@ -178,16 +180,17 @@ NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMe
 #endif
 #if defined(MOZ_WIDGET_GTK) || defined(MOZ_WIDGET_GTK2)
 NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMessengerUnixIntegration, Init)
 #endif
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMessengerContentHandler)
 NS_GENERIC_FACTORY_CONSTRUCTOR_INIT(nsMsgContentPolicy, Init)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMailDirProvider)
 NS_GENERIC_FACTORY_CONSTRUCTOR(nsMsgShutdownService)
+NS_GENERIC_FACTORY_CONSTRUCTOR(nsStopwatch)
 
 static NS_METHOD
 RegisterMailnewsContentPolicy(nsIComponentManager *aCompMgr, nsIFile *aPath,
                               const char *registryLocation, const char *componentType,
                               const nsModuleComponentInfo *info)
 {
   nsresult rv;
   nsCOMPtr<nsICategoryManager> catman =
@@ -505,13 +508,18 @@ static const nsModuleComponentInfo gComp
     },
     {
       "mail director provider",
       MAILDIRPROVIDER_CID,
       NS_MAILDIRPROVIDER_CONTRACTID,
       nsMailDirProviderConstructor,
       nsMailDirProvider::Register,
       nsMailDirProvider::Unregister
+    },
+    {
+      "stopwatch", NS_STOPWATCH_CID,
+      NS_STOPWATCH_CONTRACTID,
+      nsStopwatchConstructor
     }
 };
 
 NS_IMPL_NSGETMODULE(nsMsgBaseModule, gComponents)
   
--- a/mailnews/base/public/Makefile.in
+++ b/mailnews/base/public/Makefile.in
@@ -96,12 +96,13 @@ XPIDLSRCS	= \
 		nsIMessengerOSIntegration.idl \
 		nsIMsgMdnGenerator.idl	        \
 		nsISpamSettings.idl	        \
 		nsIMapiRegistry.idl \
 		nsIMsgCustomColumnHandler.idl \
 		nsIMsgShutdown.idl \
 		nsMsgFolderFlags.idl \
 		nsMsgMessageFlags.idl \
+		nsIStopwatch.idl \
 		$(NULL)
 
 include $(topsrcdir)/config/rules.mk
 
new file mode 100644
--- /dev/null
+++ b/mailnews/base/public/nsIStopwatch.idl
@@ -0,0 +1,77 @@
+/* ***** BEGIN LICENSE BLOCK *****
+ * Version: MPL 1.1/GPL 2.0/LGPL 2.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is mozilla.org code.
+ *
+ * The Initial Developer of the Original Code is
+ * Mozilla Messaging, Inc.
+ * Portions created by the Initial Developer are Copyright (C) 1998
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ *   Andrew Sutherland <asutherland@asutherland.org>
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * either of the GNU General Public License Version 2 or later (the "GPL"),
+ * or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
+ * in which case the provisions of the GPL or the LGPL are applicable instead
+ * of those above. If you wish to allow use of your version of this file only
+ * under the terms of either the GPL or the LGPL, and not to allow others to
+ * use your version of this file under the terms of the MPL, indicate your
+ * decision by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL or the LGPL. If you do not delete
+ * the provisions above, a recipient may use your version of this file under
+ * the terms of any one of the MPL, the GPL or the LGPL.
+ *
+ * ***** END LICENSE BLOCK ***** */
+#include "nsISupports.idl"
+
+/**
+ * Simple stopwatch mechanism for determining the amount of wall-clock time and
+ * CPU time (user + system) that has elapsed.  It is not fancy.  It is either
+ * running or it is not.  If you want coherent cpu and real time values, then
+ * you had better stop it first.  It does not keep counting when stopped,
+ * although one could add a resumeRetroactive or something to accomplish that.
+ */
+[scriptable, uuid(7a671d6e-d48f-4a4f-b87e-644815a5e381)]
+interface nsIStopwatch : nsISupports {
+  /**
+   * Start the stopwatch; all counters are reset to zero.  If you want to
+   * keep the already accumulated values, use resume instead.
+   */
+  void start();
+
+  /**
+   * Stop the stopwatch.
+   */
+  void stop();
+
+  /**
+   * Resume the stopwatch without clearing the existing counters.  Any time
+   * already accumulated on cpuTime/realTime will be kept.
+   */
+  void resume();
+
+  /**
+   * The total CPU time (user + system) in seconds accumulated between calls to
+   * start/resume and stop.  You have to stop the stopwatch to cause this value
+   * to update.
+   */
+  readonly attribute double cpuTimeSeconds;
+  /**
+   * The total wall clock time in seconds accumulated between calls to
+   * start/resume and stop.  You have to stop the stopwatch to cause this value
+   * to update.
+   */
+  readonly attribute double realTimeSeconds;
+};
--- a/mailnews/base/util/Makefile.in
+++ b/mailnews/base/util/Makefile.in
@@ -97,16 +97,17 @@ CPPSRCS		= \
 		nsMsgUtils.cpp \
 		nsMsgProtocol.cpp \
 		nsMsgMailNewsUrl.cpp \
 		nsMsgTxn.cpp \
 		nsMsgI18N.cpp \
 		nsImapMoveCoalescer.cpp \
 		nsMsgFileStream.cpp \
 		nsMsgReadStateTxn.cpp \
+		nsStopwatch.cpp \
 		$(NULL)
 
 EXPORTS		= \
 		nsMsgLineBuffer.h \
 		nsMsgKeySet.h \
 		nsMsgDBFolder.h \
 		nsMsgIdentity.h \
 		nsMsgIncomingServer.h \
new file mode 100644
--- /dev/null
+++ b/mailnews/base/util/nsStopwatch.cpp
@@ -0,0 +1,170 @@
+#include <stdio.h>
+#include <time.h>
+#ifdef XP_UNIX
+#include <unistd.h>
+#include <sys/times.h>
+#include <errno.h>
+#endif
+#ifdef XP_WIN
+#include "windows.h"
+#endif
+
+#include "nsIClassInfoImpl.h"
+
+#include "nsStopwatch.h"
+
+/*
+ * This basis for the logic in this file comes from (will used to come from):
+ *  (mozilla/)modules/libutil/public/stopwatch.cpp.
+ *  
+ * It was no longer used in the mozilla tree, and is being migrated to
+ * comm-central where we actually have a need for it.  ("Being" in the sense
+ * that it will not be removed immediately from mozilla-central.)
+ * 
+ * Simplification and general clean-up has been performed and the fix for
+ * bug 96669 has been integrated.
+ */
+
+NS_DECL_CLASSINFO(nsStopwatch)
+NS_IMPL_ISUPPORTS1_CI(nsStopwatch, nsIStopwatch)
+
+#ifdef WINCE
+#error "WINCE apparently does not provide the clock support we require."
+#endif
+
+#ifdef XP_UNIX
+/** the number of ticks per second */
+static double gTicks = 0;
+#elif defined(WIN32)
+// a tick every 100ns, 10 per us, 10 * 1000 per ms, 10 * 1000 * 1000 per sec.
+#define TICKS_PER_SECOND 10000000.0
+// subtract off to get to the unix epoch
+#define UNIX_EPOCH_IN_FILE_TIME 116444736000000000L
+#endif // XP_UNIX
+
+nsStopwatch::nsStopwatch()
+ : fTotalRealTimeSecs(0.0)
+ , fTotalCpuTimeSecs(0.0)
+ , fRunning(false)
+{
+#ifdef XP_UNIX
+  // idempotent in the event of a race under all coherency models
+  if (!gTicks)
+  {
+    gTicks = (clock_t)sysconf(_SC_CLK_TCK);
+    // in event of failure, pick an arbitrary value so we don't divide by zero.
+    if (errno)
+      gTicks = 1000000L;
+  }
+#endif
+}
+
+nsStopwatch::~nsStopwatch()
+{
+}
+
+NS_IMETHODIMP nsStopwatch::Start()
+{
+  fTotalRealTimeSecs = 0.0;
+  fTotalCpuTimeSecs = 0.0;
+  return Resume();
+}
+
+NS_IMETHODIMP nsStopwatch::Stop()
+{
+  fStopRealTimeSecs = GetRealTime();
+  fStopCpuTimeSecs  = GetCPUTime();
+  if (fRunning)
+  {
+    fTotalCpuTimeSecs  += fStopCpuTimeSecs  - fStartCpuTimeSecs;
+    fTotalRealTimeSecs += fStopRealTimeSecs - fStartRealTimeSecs;
+  }
+  fRunning = false;
+  return NS_OK;
+}
+
+NS_IMETHODIMP nsStopwatch::Resume()
+{
+  if (!fRunning)
+  {
+    fStartRealTimeSecs = GetRealTime();
+    fStartCpuTimeSecs  = GetCPUTime();
+  }
+  fRunning = true;
+  return NS_OK;
+}
+
+NS_IMETHODIMP nsStopwatch::GetCpuTimeSeconds(double *result)
+{
+  NS_ENSURE_ARG_POINTER(result);
+  *result = fTotalCpuTimeSecs;
+  return NS_OK;
+}
+
+NS_IMETHODIMP nsStopwatch::GetRealTimeSeconds(double *result)
+{
+  NS_ENSURE_ARG_POINTER(result);
+  *result = fTotalRealTimeSecs;
+  return NS_OK;
+}
+
+double nsStopwatch::GetRealTime()
+{
+#if defined(XP_UNIX)
+  struct tms cpt;
+  return (double)times(&cpt) / gTicks;
+#elif defined(WIN32)
+  union     {FILETIME ftFileTime;
+             __int64  ftInt64;
+            } ftRealTime; // time the process has spent in kernel mode
+  SYSTEMTIME st;
+  GetSystemTime(&st);
+  SystemTimeToFileTime(&st,&ftRealTime.ftFileTime);
+  return (double)(ftRealTime.ftInt64 - UNIX_EPOCH_IN_FILE_TIME) /
+                 TICKS_PER_SECOND;
+#endif
+}
+
+double nsStopwatch::GetCPUTime()
+{
+#if defined(XP_UNIX)
+  struct tms cpt;
+  times(&cpt);
+  return (double)(cpt.tms_utime+cpt.tms_stime) / gTicks;
+#elif defined(WIN32)
+
+  DWORD       ret;
+  FILETIME    ftCreate,       // when the process was created
+              ftExit;         // when the process exited
+
+  union     {FILETIME ftFileTime;
+             __int64  ftInt64;
+            } ftKernel; // time the process has spent in kernel mode
+
+  union     {FILETIME ftFileTime;
+             __int64  ftInt64;
+            } ftUser;   // time the process has spent in user mode
+
+  HANDLE hProcess = GetCurrentProcess();
+  ret = GetProcessTimes (hProcess, &ftCreate, &ftExit,
+                                   &ftKernel.ftFileTime,
+                                   &ftUser.ftFileTime);
+  if (ret != PR_TRUE)
+  {
+    ret = GetLastError ();
+#ifdef DEBUG
+    printf("%s 0x%lx\n"," Error on GetProcessTimes", (int)ret);
+#endif
+  }
+
+  /*
+   * Process times are returned in a 64-bit structure, as the number of
+   * 100 nanosecond ticks since 1 January 1601.  User mode and kernel mode
+   * times for this process are in separate 64-bit structures.
+   * To convert to floating point seconds, we will:
+   *
+   *          Convert sum of high 32-bit quantities to 64-bit int
+   */
+  return (double) (ftKernel.ftInt64 + ftUser.ftInt64) / TICKS_PER_SECOND;
+#endif
+}
new file mode 100644
--- /dev/null
+++ b/mailnews/base/util/nsStopwatch.h
@@ -0,0 +1,45 @@
+#ifndef _nsStopwatch_h_
+#define _nsStopwatch_h_
+
+#include "nsIStopwatch.h"
+
+#include "msgCore.h"
+
+#define NS_STOPWATCH_CID \
+{0x6ef7eafd, 0x72d0, 0x4c56, {0x94, 0x09, 0x67, 0xe1, 0x6d, 0x0f, 0x25, 0x5b}}
+
+#define NS_STOPWATCH_CONTRACTID "@mozilla.org/stopwatch;1"
+
+#undef  IMETHOD_VISIBILITY
+#define IMETHOD_VISIBILITY NS_VISIBILITY_DEFAULT
+
+class NS_MSG_BASE nsStopwatch : public nsIStopwatch
+{
+public:
+  NS_DECL_ISUPPORTS
+  NS_DECL_NSISTOPWATCH
+
+  nsStopwatch();
+  virtual ~nsStopwatch();
+private:
+  /// Wall-clock start time in seconds since unix epoch.
+  double fStartRealTimeSecs;
+  /// Wall-clock stop time in seconds since unix epoch.
+  double fStopRealTimeSecs;
+  /// CPU-clock start time in seconds (of CPU time used since app start)
+  double fStartCpuTimeSecs;
+  /// CPU-clock stop time in seconds (of CPU time used since app start)
+  double fStopCpuTimeSecs;
+  /// Total wall-clock time elapsed in seconds. 
+  double fTotalRealTimeSecs;
+  /// Total CPU time elapsed in seconds.
+  double fTotalCpuTimeSecs;
+
+  /// Is the timer running?
+  bool fRunning;
+  
+  static double GetRealTime();
+  static double GetCPUTime();
+};
+
+#endif // _nsStopwatch_h_
--- a/mailnews/db/gloda/modules/indexer.js
+++ b/mailnews/db/gloda/modules/indexer.js
@@ -291,16 +291,25 @@ var GlodaIndexer = {
    */
   _timer: null,
   /**
    * Our nsITimer that we use to schedule events in the "far" future.  For now,
    *  this means not compelling an initial indexing sweep until some number of
    *  seconds after startup. 
    */
   _longTimer: null,
+  /**
+   * Our performance stopwatch that helps us adapt our indexing constants so
+   *  as to not explode your computer.  Kind of us, no?
+   */
+  _perfStopwatch: null,
+  /**
+   * Of course, we need a timer to actually drive our stopwatch usage.
+   */
+  _perfTimer: null,
 
   _inited: false,
   /**
    * Initialize the indexer.
    */
   _init: function gloda_index_init() {
     if (this._inited)
       return;
@@ -312,16 +321,25 @@ var GlodaIndexer = {
     this._msgFolderListener.indexer = this;
     
     this._callbackHandle.init();
     
     // create the timer that drives our intermittent indexing
     this._timer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
     // create the timer for larger offsets independent of indexing
     this._longTimer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
+    
+    // create our performance stopwatch and timer
+    try {
+    this._perfStopwatch = Cc["@mozilla.org/stopwatch;1"]
+                            .createInstance(Ci.nsIStopwatch);
+    } catch (ex) {
+      this._log.error("problem creating stopwatch!: " + ex);
+    }
+    this._perfTimer = Cc["@mozilla.org/timer;1"].createInstance(Ci.nsITimer);
 
     // figure out if event-driven indexing should be enabled...
     let prefService = Cc["@mozilla.org/preferences-service;1"].
                         getService(Ci.nsIPrefService);
     let branch = prefService.getBranch("mailnews.database.global.indexer.");
     let eventDrivenEnabled = false; // default
     let performInitialSweep = true; // default
     try {
@@ -360,16 +378,22 @@ var GlodaIndexer = {
       this._timer.cancel();
     } catch (ex) {}
     this._timer = null;
     try {
       this._longTimer.cancel();
     } catch (ex) {}
     this._longTimer = null;
     
+    this._perfStopwatch = null;
+    try {
+      this._perfTimer.cancel();
+    } catch (ex) {}
+    this._perfTimer = null;
+    
     this._indexerIsShutdown = true;
     
     if (!this.enabled)
       return true;
     
     this._log.info("Shutting Down");
 
     this.suppressIndexing = true;
@@ -529,16 +553,20 @@ var GlodaIndexer = {
     if (!this._indexingDesired && aShouldIndex) {
       this._indexingDesired = true;
       if (this.enabled && !this._indexingActive && !this._suppressIndexing) {
         this._log.info("+++ Indexing Queue Processing Commencing");
         this._indexingActive = true;
         this._timer.initWithCallback(this._wrapCallbackDriver,
                                      this._indexInterval,
                                      Ci.nsITimer.TYPE_ONE_SHOT);
+        // Start the performance sampling timer since indexing is now active.
+        // (That's the dude who tracks processor utilization and adjusts our
+        // indexing constants.)
+        this.perfSampling = true;
       }
     }
   },
   
   _suppressIndexing: false,
   /**
    * Set whether or not indexing should be suppressed.  This is to allow us to
    *  avoid running down a laptop's battery when it is not on AC.  Only code
@@ -553,16 +581,23 @@ var GlodaIndexer = {
     //  to do, and the indexing process had actually stopped.
     if (!this._suppressIndexing && this._indexingDesired &&
         !this._indexingActive) {
         this._log.info("+++ Indexing Queue Processing Resuming");
         this._indexingActive = true;
         this._timer.initWithCallback(this._wrapCallbackDriver,
                                      this._indexInterval,
                                      Ci.nsITimer.TYPE_ONE_SHOT);
+        // Start the performance sampling clock now rather than in the timer
+        //  callbacks because it reduces the number of states the system can
+        //  be in.  If we are indexing and we are in control of utilization,
+        //  sampling is active.  If we are indexing but not in control, we do
+        //  stop sampling (not ideal, but realistic).  If we are not indexing,
+        //  we are not performance sampling.
+        this.perfSampling = true;
     }
   },
 
   /**
    * Our timer-driven callback to schedule our first initial indexing sweep.
    *  Because it is invoked by an nsITimer it operates without the benefit of
    *  a 'this' context and must use GlodaIndexer instead of this.
    * Since an initial sweep could have been performed before we get invoked,
@@ -586,17 +621,152 @@ var GlodaIndexer = {
   set indexingSweepNeeded(aNeeded) {
     if (!this._indexingSweepActive && aNeeded) {
       this._indexQueue.push(new IndexingJob("sweep", 0, null));
       this._indexingJobGoal++;
       this._indexingSweepActive = true;
       this.indexing = true;
     }
   },
-
+  
+  /**
+   * Number of milliseconds between performance samples.
+   */
+  _PERF_SAMPLE_RATE_MS: 1000,
+  set perfSampling(aEnable) {
+    if (aEnable) {
+      this._perfSamples = [];
+      this._perfTimer.initWithCallback(this._perfTimerFire,
+                                       this._PERF_SAMPLE_RATE_MS,
+          Ci.nsITimer.TYPE_REPEATING_SLACK);
+      this._perfStopwatch.start();
+    }
+    else {
+      this._perfTimer.cancel();
+      // we stop the stopwatch mainly so our state makes sense to anyone
+      //  debugging and for our unit test.  In reality, the stopwatch only
+      //  does work on the calls to start and stop, and no expense is incurred
+      //  in the interim, so this is actually expense with no benefit.  But it's
+      //  not much of an expense.
+      this._perfStopwatch.stop();
+    }
+  },
+  
+  /**
+   * Number of performance samples to average together.  We average to try and
+   *  stabilize our decision making in the face of transient thunderbird CPU
+   *  utilization spikes that are not our fault.  (User activity, garbage
+   *  collection, etc.
+   */
+  _perfSamplePointCount: 2,
+  _perfSamples: [],
+  _perfTimerFire: function() {
+    GlodaIndexer.perfTimerFire();
+  },
+  /**
+   * Smallest allowable sleep time, in milliseconds.  This must be a multiple of
+   *  _TIMER_STEP_SIZE.  Keep in mind that we effectively run in a timer-with-
+   *  slack mode of operation.  This means that the time between our timer
+   *  firing is actually (_indexInterval + the time we actually spend
+   *  processing), so 1000/_indexInterval is really our maximum firing rate if
+   *  we did no work.
+   */
+  _MIN_TIMER_INTERVAL_MS: 20,
+  /**
+   * The timer interval adjustment size, in milliseconds.
+   */
+  _TIMER_STEP_SIZE: 10,
+  /**
+   * The maximum amount of time in milliseconds we will sleep between firings.
+   *  The reason we cap ourselves is that although we are aware of our cpu
+   *  utilization, the autosync logic is not.  The autosync logic can easily
+   *  drive thunderbird's utilization above our acceptable threshold for
+   *  extended periods of time, resulting in our logic deciding to back off
+   *  every time it makes a decision, even though it will have no meaningful
+   *  impact.  If we did not do this, it might be some time before indexing
+   *  would resume at any meaningful rate.
+   */
+  _MAX_TIMER_INTERVAL_MS: 400,
+  /**
+   * Periodic performance adjustment logic.  The overall goal is to adjust our
+   *  rate of work so that we don't interfere with the user's activities when
+   *  they are around (non-idle), and the system in general (when idle).  Being
+   *  nice when idle isn't quite as important, but is a good idea so that when
+   *  the user un-idles we are able to back off nicely.  Also, we give other
+   *  processes on the system a chance to do something.
+   * 
+   * The two knobs we have to play with are:
+   * - The amount of time we sleep between work batch processing.  Keep in mind
+   *   that many of our operations are actually asynchronous, so we aren't
+   *   entirely starving the event queue.  However, a lot of the async stuff
+   *   can end up not having any actual delay between events. For example, we
+   *   only index offline message bodies, so there's no network latency
+   *   involved, just disk IO; the only meaningful latency will be the initial
+   *   disk seek (if there is one... pre-fetching may seriously be our friend).
+   * - The amount of work we do between intentional sleeps (number of tokens).
+   * 
+   * In order to maintain responsiveness, I assert that we want to minimize the
+   *  length of the time we are dominating the event queue.  This suggests
+   *  that we want break up our blocks of work frequently.  But not so
+   *  frequently that there is a lot of waste.  Accordingly our algorithm is
+   *  basically:
+   *  
+   * Using too much cpu:
+   *  First, do less work per slice = reduce tokens.
+   *  Second, space our work batches out more = increase sleep time.
+   *  
+   * Using less cpu than budgeted:
+   *  First, reduce the spacing between our work batches = decrease sleep time.
+   *  Second, do more work per slice = increase tokens.
+   */
+  perfTimerFire: function perfTimerFire() {
+    let stopwatch = this._perfStopwatch;
+    stopwatch.stop();
+    
+    let realTime = stopwatch.realTimeSeconds;
+    let cpuTime = stopwatch.cpuTimeSeconds;
+    
+    let dir = "none", averagePercent = 0;
+    if (realTime) {
+      while (this._perfSamples.length >= this._perfSamplePointCount)
+        this._perfSamples.shift();
+      
+      let cpuPercent = cpuTime / realTime;
+      this._perfSamples.push(cpuPercent);
+      
+      if (this._perfSamples.length == this._perfSamplePointCount) { 
+        for (let i = 0; i < this._perfSamples.length; i++)
+          averagePercent += this._perfSamples[i];
+        averagePercent /= this._perfSamples.length;
+        
+        if (averagePercent > this._cpuTarget) {
+          dir = "down";
+          if (this._indexTokens > 1)
+            this._indexTokens--;
+          else if (this._indexInterval < this._MAX_TIMER_INTERVAL_MS)
+            this._indexInterval += this._TIMER_STEP_SIZE;
+        }
+        else if (averagePercent + 0.1 < this._cpuTarget) {
+          dir = "up";
+          if (this._indexInterval > this._MIN_TIMER_INTERVAL_MS)
+            this._indexInterval -= this._TIMER_STEP_SIZE;
+          else
+            this._indexTokens++;
+        }
+      }
+    
+      GlodaIndexer._log.debug("PERFORMANCE " + dir +
+                              " average: " + averagePercent +
+                              " interval: " + this._indexInterval +
+                              " tokens: " + this._indexTokens);
+    }
+    
+    stopwatch.start();
+  },
+  
   /**
    * Indicates that we have pending deletions to process, meaning that there
    *  are gloda message rows flagged for deletion.  If this value is a boolean,
    *  it means the value is known reliably.  If this value is null, it means
    *  that we don't know, likely because we have started up and have not checked
    *  the database.
    */
   pendingDeletions: null,
@@ -679,41 +849,45 @@ var GlodaIndexer = {
    */
   _indexIdleThresholdSecs: 15,
   
   /**
    * The time delay in milliseconds before we should schedule our initial sweep.
    */
   _initialSweepDelay: 10000,
   
+  _cpuTarget: 0.4,
+  _cpuTarget_whenActive: 0.4,
+  _cpuTarget_whenIdle: 0.8,
+  
   /**
    * The time interval, in milliseconds between performing indexing work.
    *  This may be altered by user session (in)activity.
    */ 
-  _indexInterval: 100,
-  _indexInterval_whenActive: 100,
+  _indexInterval: 60,
+  _indexInterval_whenActive: 60,
   _indexInterval_whenIdle: 20,
   /**
    * Number of indexing 'tokens' we are allowed to consume before yielding for
    *  each incremental pass.  Consider a single token equal to indexing a single
    *  medium-sized message.  This may be altered by user session (in)activity.
    * Because we fetch message bodies, which is potentially asynchronous, this
    *  is not a precise knob to twiddle.
    */
-  _indexTokens: 2,
-  _indexTokens_whenActive: 2,
+  _indexTokens: 5,
+  _indexTokens_whenActive: 5,
   _indexTokens_whenIdle: 10,
   
   /**
    * Number of indexing 'tokens' we consume before we issue a commit.  The
    *  goal is to de-couple our time scheduling from our commit schedule.  It's
    *  far better for user responsiveness to take lots of little bites instead
    *  of a few big ones, but bites that result in commits cannot be little... 
    */
-  _indexCommitTokens: 10,
+  _indexCommitTokens: 40,
   
   /**
    * The number of messages that we should queue for processing before letting
    *  them fall on the floor and relying on our folder-walking logic to ensure
    *  that the messages are indexed.
    * The reason we allow for queueing messages in an event-driven fashion is
    *  that once we have reached a steady-state, it is preferable to be able to
    *  deal with new messages and modified meta-data in a prompt fasion rather
@@ -833,17 +1007,21 @@ var GlodaIndexer = {
     // leave the folder if we haven't explicitly left it.
     if (this._indexingFolder !== null) {
       this._indexerLeaveFolder();
     }
     
     this._indexingGlodaFolder = GlodaDatastore._mapFolderID(aFolderID);
     this._indexingFolder = this._indexingGlodaFolder.getXPCOMFolder(
                              this._indexingGlodaFolder.kActivityIndexing);
-    
+
+    // The processor utilization required to enter a folder is not our
+    //  fault; don't sample this.  We turn it back on once we are in the folder.
+    this.perfSampling = false;
+
     if (this._indexingFolder)
       this._log.debug("Entering folder: " + this._indexingFolder.URI);
 
     try {
       // The msf may need to be created or otherwise updated for local folders.
       // This may require yielding until such time as the msf has been created.
       try {
         if (this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder) {
@@ -870,16 +1048,18 @@ var GlodaIndexer = {
       }
       // we get an nsIMsgDatabase out of this (unsurprisingly) which
       //  explicitly inherits from nsIDBChangeAnnouncer, which has the
       //  AddListener call we want.
       if (this._indexingDatabase == null)
         this._indexingDatabase = this._indexingFolder.msgDatabase;
       if (aNeedIterator)
         this._indexerGetIterator();
+      // re-enable performance sampling; we're responsible for our actions again
+      this.perfSampling = true;
       this._indexingDatabase.AddListener(this._databaseAnnouncerListener);
     }
     catch (ex) {
       this._log.error("Problem entering folder: " +
                       (this._indexingFolder ?
                          this._indexingFolder.prettiestName : "unknown") + 
                       ", skipping. Error was: " + ex.fileName + ":" +
                       ex.lineNumber + ": " + ex);
@@ -905,16 +1085,18 @@ var GlodaIndexer = {
    */
   _indexerCompletePendingFolderEntry:
       function gloda_indexer_indexerCompletePendingFolderEntry() {
     this._indexingDatabase = this._indexingFolder.msgDatabase;
     if (this._pendingFolderWantsIterator)
       this._indexerGetIterator();
     this._indexingDatabase.AddListener(this._databaseAnnouncerListener);
     this._log.debug("...Folder Loaded!");
+    // re-enable performance sampling; we're responsible for our actions again
+    this.perfSampling = true;
 
     // the load is no longer pending; we certainly don't want more notifications 
     this._pendingFolderEntry = null;
     // indexerEnterFolder returned kWorkAsync, which means we need to notify
     //  the callback driver to get things going again.
     this.callbackDriver();
   },
   
@@ -1038,18 +1220,21 @@ var GlodaIndexer = {
           this._batch = null;
           // (intentional fall-through to re-scheduling logic) 
         // the batch wants to get re-scheduled, do so.
         case this.kWorkPause:
           if (this.indexing)
             this._timer.initWithCallback(this._wrapCallbackDriver,
                                          this._indexInterval,
                                          Ci.nsITimer.TYPE_ONE_SHOT);
-          else // it's important to indicate no more callbacks are in flight
+          else { // it's important to indicate no more callbacks are in flight
             this._indexingActive = false;
+            // we're not indexing anymore, so we're not sampling anymore.
+            this.perfSampling = false;
+          }
           break;
         case this.kWorkAsync:
           // there is nothing to do.  some other code is now responsible for
           //  calling us.
           break;
       }
     }
     finally {    
@@ -1108,42 +1293,46 @@ var GlodaIndexer = {
        last yield kWorkAsync */
     onItemsAdded: function() {},
     onItemsModified: function() {},
     onItemsRemoved: function() {},
     onQueryCompleted: function(aCollection) {
       GlodaIndexer.callbackDriver();
     }
   },
-  _forceGCCounter: 0,
-  FORCE_GC_THRESHOLD: 256,
   _workBatchData: undefined,
   /**
    * The workBatch generator handles a single 'batch' of processing, managing
    *  the database transaction and keeping track of "tokens".  It drives the
    *  _actualWorker generator which is doing the work.
    * workBatch will only produce kWorkAsync and kWorkDone notifications.
    *  If _actualWorker returns kWorkSync and there are still tokens available,
    *  workBatch will keep driving _actualWorker until it encounters a
    *  kWorkAsync (which workBatch will yield to callbackDriver), or it runs
    *  out of tokens and yields a kWorkDone. 
    */
   workBatch: function gloda_index_workBatch() {
     let commitTokens = this._indexCommitTokens;
     GlodaDatastore._beginTransaction();
 
     while (commitTokens > 0) {
+      // both explicit work activity points (sync + async) and transfer of
+      //  control return (via kWorkDone*) results in a token being eaten.  The
+      //  idea now is to make tokens less precious so that the adaptive logic
+      //  can adjust them with less impact.  (Before this change, doing 1
+      //  token's work per cycle ended up being an entire non-idle time-slice's
+      //  work.)
       for (let tokensLeft = this._indexTokens; tokensLeft > 0;
           tokensLeft--, commitTokens--) {
         // we need to periodically force a GC to avoid excessive process size
         //  and because nsAutoLock is a jerk on debug builds
-        if (++this._forceGCCounter >= this.FORCE_GC_THRESHOLD) {
-          Cu.forceGC();
-          this._forceGCCounter = 0;
-        }
+        // there is a constant in GlodaUtils that may need to be adjusted (and
+        //  potentially augmented with time-awareness) as token logic is
+        //  adjusted; or just for tuning purposes.
+        GlodaUtils.maybeGarbageCollect();
         
         if ((this._callbackHandle.activeIterator === null) &&
             !this._hireJobWorker()) {
           commitTokens = 0;
           break;
         }
       
         // XXX for performance, we may want to move the try outside the for loop
@@ -1156,21 +1345,19 @@ var GlodaIndexer = {
               this._workBatchData = undefined;
               break;
             case this.kWorkAsync:
               this._workBatchData = yield this.kWorkAsync;
               break;
             case this.kWorkDone:
               this._callbackHandle.pop();
               this._workBatchData = undefined;
-              tokensLeft++; // don't eat a token for this pass
               break;
             case this.kWorkDoneWithResult:
               this._workBatchData = this._callbackHandle.popWithResult();
-              tokensLeft++; // don't eat a token for this pass
               continue;
           }
         }
         catch (ex) {
           // Try and recover if the job is recoverable and the iterator that
           //  experienced the problem wasn't the job worker.  (If it was the
           //  job worker, we can't rely on its state to be intact.) 
           if (this._curIndexingJob.recoverable > 0 &&
@@ -1223,16 +1410,18 @@ var GlodaIndexer = {
       this._log.info("--- Done indexing, disabling timer renewal.");
       
       if (this._indexingFolder !== null) {
         this._indexerLeaveFolder(true);
       }
       
       this._curIndexingJob = null;
       this._indexingDesired = false;
+      // we're not indexing anymore, so we're not sampling anymore
+      this.perfSampling = false;
       this._indexingJobCount = 0;
       this._indexingJobGoal = 0;
       return false;
     }
 
     //this._log.debug("++ Pulling job from queue of size " +
     //                this._indexQueue.length);
     let job = this._curIndexingJob = this._indexQueue.shift();
@@ -1390,17 +1579,17 @@ var GlodaIndexer = {
     
     if (!this.shouldIndexFolder(this._indexingFolder))
       yield this.kWorkDone;
     
     aJob.goal = this._indexingFolder.getTotalMessages(false);
     
     // there is of course a cost to all this header investigation even if we
     //  don't do something.  so we will yield with kWorkSync for every block. 
-    const HEADER_CHECK_BLOCK_SIZE = 100;
+    const HEADER_CHECK_BLOCK_SIZE = 10;
     
     let isLocal = this._indexingFolder instanceof Ci.nsIMsgLocalMailFolder;
     // we can safely presume if we are here that this folder has been selected
     //  for offline processing...
 
     // Handle the filthy case.  A filthy folder may have misleading properties
     //  on the message that claim the message is indexed.  They are misleading
     //  because the database, for whatever reason, does not have the messages
@@ -1430,22 +1619,23 @@ var GlodaIndexer = {
         //  so no action is required.
       }
       // this will automatically persist to the database
       glodaFolder.dirtyStatus = glodaFolder.kFolderDirty;
       
       // We used up the iterator, get a new one.
       this._indexerGetIterator();
     }
-    
+
     for (let msgHdr in this._indexingIterator) {
       // per above, we want to periodically release control while doing all
       //  this header traversal/investigation.
-      if (++aJob.offset % HEADER_CHECK_BLOCK_SIZE == 0)
+      if (++aJob.offset % HEADER_CHECK_BLOCK_SIZE == 0) {
         yield this.kWorkSync;
+      }
       
       if ((isLocal || (msgHdr.flags & MSG_FLAG_OFFLINE)) &&
           !(msgHdr.flags & MSG_FLAG_EXPUNGED)) {
         // this returns 0 when missing
         let glodaMessageId = msgHdr.getUint32Property(
                              this.GLODA_MESSAGE_ID_PROPERTY);
         
         // if it has a gloda message id, it has been indexed, but it still
@@ -1652,24 +1842,34 @@ var GlodaIndexer = {
   },
   
   /* *********** Event Processing *********** */
   observe: function gloda_indexer_observe(aSubject, aTopic, aData) {
     // idle
     if (aTopic == "idle") {
       if (this.indexing)
         this._log.debug("Detected idle, throttling up.");
+      // save off our adapted active values
+      this._indexInterval_whenActive = this._indexInterval;
+      this._indexTokens_whenActive = this._indexTokens;
+      // start using our idle values
       this._indexInterval = this._indexInterval_whenIdle;
       this._indexTokens = this._indexTokens_whenIdle;
+      this._cpuTarget = this._cpuTarget_whenIdle; // (don't need to save)
     }
     else if (aTopic == "back") {
       if (this.indexing)
         this._log.debug("Detected un-idle, throttling down.");
+      // save off our idle values
+      this._indexInterval_whenIdle = this._indexInterval;
+      this._indexTokens_whenIdle = this._indexTokens;
+      // start using our active values
       this._indexInterval = this._indexInterval_whenActive;
       this._indexTokens = this._indexTokens_whenActive;
+      this._cpuTarget = this._cpuTarget_whenActive; // (don't need to save)
     }
     // offline status
     else if (aTopic == "network:offline-status-changed") {
       if (aData == "offline") {
         this.suppressIndexing = true;
       }
       else { // online
         this.suppressIndexing = false;
--- a/mailnews/db/gloda/modules/utils.js
+++ b/mailnews/db/gloda/modules/utils.js
@@ -149,9 +149,54 @@ var GlodaUtils = {
       str = sstream.read(4096);
     }
 
     sstream.close();
     fstream.close();
 
     return data;
   },
+  
+  /**
+   * Force a garbage-collection sweep.  Gloda has to force garbage collection
+   *  periodically because XPConnect's XPCJSRuntime::DeferredRelease mechanism
+   *  can end up holding onto a ridiculously high number of XPConnect objects in
+   *  between normal garbage collections.  This has mainly posed a problem
+   *  because nsAutolock is a jerk in DEBUG builds, but in theory this also
+   *  helps us even out our memory usage.
+   * We also are starting to do this more to try and keep the garbage collection
+   *  durations acceptable.  We intentionally avoid triggering the cycle
+   *  collector in those cases, as we do presume a non-trivial fixed cost for
+   *  cycle collection.  (And really all we want is XPConnect to not be a jerk.)
+   * This method exists mainly to centralize our GC activities and because if
+   *  we do start involving the cycle collector, that is a non-trivial block of
+   *  code to copy-and-paste all over the place (at least in a module).
+   * 
+   * @param aCycleCollecting Do we need the cycle collector to run?  Currently
+   *     unused / unimplemented, but we would use
+   *     nsIDOMWindowUtils.garbageCollect() to do so.
+   */
+  forceGarbageCollection:
+    function gloda_utils_garbageCollection(aCycleCollecting) {
+    Cu.forceGC();
+  },
+  
+  _forceGCCounter: 0,
+  /**
+   * The question of when we should actually force the garbage collection is
+   *  tricky.  Right now, our only caller is from the indexer, and the indexer
+   *  issues its calls based on token consumption, which is already a fairly
+   *  nebulous sort of thing.  On the upside, tokens do correlate with
+   *  XPConnect activity fairly well, although just how much does vary a bit.
+   */
+  FORCE_GC_THRESHOLD: 64,
+  /**
+   * Along the lines of forceGarbageCollection, allow code to hint that it is
+   *  doing a fair bit of garbage generation as it relates to XPConnect and that
+   *  we should note it and consider garbage collecting.
+   */
+  maybeGarbageCollect: function gloda_utils_maybeGarbageCollect() {
+    if (++this._forceGCCounter >= this.FORCE_GC_THRESHOLD) {
+      GlodaUtils.forceGarbageCollection(false);
+      this._forceGCCounter = 0;
+    }
+  }
 };
--- a/mailnews/db/gloda/test/resources/glodaTestHelper.js
+++ b/mailnews/db/gloda/test/resources/glodaTestHelper.js
@@ -215,16 +215,18 @@ function imsInit() {
     prefSvc.setBoolPref("mail.biff.animate_dock_icon", false);
   
     Gloda.addIndexerListener(messageIndexerListener.onIndexNotification);
     ims.catchAllCollection = Gloda._wildcardCollection(Gloda.NOUN_MESSAGE);
     ims.catchAllCollection.listener = messageCollectionListener;
     
     // The indexer doesn't need to worry about load; zero his rescheduling time. 
     GlodaIndexer._indexInterval = 0;
+    // And it doesn't need to adjust its performance, either.
+    GlodaIndexer._PERF_SAMPLE_RATE_MS = 24 * 60 * 60 * 1000;
     
     if (ims.injectMechanism == INJECT_FAKE_SERVER) {
       // set up POP3 fakeserver to feed things in...
       [ims.daemon, ims.server] = setupServerDaemon();
       // (this will call loadLocalMailAccount())
       ims.incomingServer = createPop3ServerAndLocalFolders();
   
       ims.pop3Service = Cc["@mozilla.org/messenger/popservice;1"]
@@ -465,25 +467,40 @@ var messageCollectionListener = {
   },
   
   onItemsRemoved: function(aItems) {
     dump("!!! messageCollectionListener.onItemsRemoved\n");
   }
 };
 
 /**
+ * Allow tests to register a callback to be invoked when the indexing completes.
+ *   Only one at a time, etc.  
+ */
+function runOnIndexingComplete(aCallback) {
+  messageIndexerListener.callbackOnDone = aCallback; 
+}
+
+/**
  * Gloda indexer listener, used to know when all active indexing jobs have
  *  completed so that we can try and process all the things that should have
  *  been processed.
  */
 var messageIndexerListener = {
+  callbackOnDone: null,
   onIndexNotification: function(aStatus, aPrettyName, aJobIndex, aJobTotal,
                                 aJobItemIndex, aJobItemGoal) {
     // we only care if indexing has just completed...
     if (!GlodaIndexer.indexing) {
+      if (messageIndexerListener.callbackOnDone) {
+        let callback = messageIndexerListener.callbackOnDone;
+        messageIndexerListener.callbackOnDone = null;
+        callback();
+      }
+      
       let ims = indexMessageState;
       
       // this is just the synthetic notification if inputMessages is null
       if (ims.inputMessages === null) {
         dump("((( ignoring indexing notification, assuming synthetic " +
              "notification.\n");
         return;
       }
@@ -885,17 +902,16 @@ function _gh_test_iterator() {
     killFakeServer();
   }
 
   do_test_finished();
   
   // once the control flow hits the root after do_test_finished, we're done,
   //  so let's just yield something to avoid callers having to deal with an
   //  exception indicating completion.
-  glodaHelperIterator = null;
   yield null;
 }
 
 var _next_test_currently_in_test = false;
 function next_test() {
   // to avoid crazy messed up stacks, use a time-out to get us to our next thing
   if (_next_test_currently_in_test) {
     do_timeout(0, "next_test()");
@@ -918,17 +934,17 @@ DEFAULT_LONGEST_TEST_RUN_CONCEIVABLE_SEC
 /**
  * Test driving logic that takes a list of tests to run.  Every completed test
  *  needs to call (or cause to be called) next_test.
  * 
  * @param aTests A list of test functions to call.
  * @param aLongestTestRunTimeConceivableInSecs Optional parameter 
  */
 function glodaHelperRunTests(aTests, aLongestTestRunTimeConceivableInSecs) {
-  if (aLongestTestRunTimeConceivableInSecs === undefined)
+  if (aLongestTestRunTimeConceivableInSecs == null)
     aLongestTestRunTimeConceivableInSecs =
         DEFAULT_LONGEST_TEST_RUN_CONCEIVABLE_SECS;
   do_timeout(aLongestTestRunTimeConceivableInSecs * 1000,
       "do_throw('Timeout running test, and we want you to have the log.');");
   
   imsInit();
   glodaHelperTests = aTests;
   glodaHelperIterator = _gh_test_iterator();
new file mode 100644
--- /dev/null
+++ b/mailnews/db/gloda/test/resources/mockIndexer.js
@@ -0,0 +1,38 @@
+/**
+ * A mock gloda indexer.  Right now it just exists to let us cause the indexer
+ *  to think it is indexing but really have nothing going on.
+ */
+var MockIndexer = {
+  /* public interface */
+  name: "mock_indexer",
+  enable: function() {
+    this.enabled = true;
+  },
+  disable: function() {
+    this.enabled = false;
+  },
+  get workers() {
+    return [["forever", this._worker_index_forever]];
+  },
+  initialSweep: function() {
+    this.initialSweepCalled = false;
+  },
+  /* mock interface */
+  enabled: false,
+  initialSweepCalled: false,
+  indexForever: function() {
+    GlodaIndexer.indexJob(new IndexingJob("forever", 0, null));
+  },
+  stopIndexingForever: function() {
+    GlodaIndexer.callbackDriver();
+  },
+  /* implementation */
+  _worker_index_forever: function(aJob, aCallbackHandle) {
+    // pretend that something async is happening, but nothing is really
+    //  happening!  muahahaha!
+    // 
+    yield GlodaIndexer.kWorkAsync;
+    yield GlodaIndexer.kWorkDone;
+  }
+};
+GlodaIndexer.registerIndexer(MockIndexer);
new file mode 100644
--- /dev/null
+++ b/mailnews/db/gloda/test/resources/mockTimer.js
@@ -0,0 +1,77 @@
+/**
+ * Mock nsITimer implementation.  Intended to be clobbered into place after the
+ *  actual timer would normally be constructed.  Has a helpful method to help
+ *  you do that, too!
+ */
+function MockTimer(aObj, aAttrName) {
+  if (aObj && aAttrName)
+    this.clobber(aObj, aAttrName);
+}
+MockTimer.prototype = {
+  /* public interface */
+  TYPE_ONE_SHOT: 0,
+  TYPE_REPEATING_SLACK: 1,
+  TYPE_REPEATING_PRECISE: 2,
+  initWithCallback: function(aCallback, aDelay, aType) {
+    if (aCallback instanceof Ci.nsITimerCallback)
+      this.callback = aCallback;
+    else // it was just a function that we need to dress up.
+      this.callback = {notify: function() {aCallback();}};
+    this.delay = aDelay;
+    this.type = aType;
+  },
+  init: function(aObserver, aDelay, aType) {
+    this.observer = aObserver;
+    this.delay = aDelay;
+    this.type = aType;
+    this.callback = null;
+  },
+  cancel: function() {
+    this.callback = null;
+  },
+  delay: 0,
+  type: 0,
+  _callback: null,
+  _activeCallback: null,
+  get callback() {
+    return this._callback || this._activeCallback;
+  },
+  set callback(aCallback) {
+    this._callback = aCallback;
+  },
+  get target() {
+    throw Error("Homey don't play that");
+  },
+  /* private */
+  observer: null,
+  /* mock interface */
+  get oneShot() {
+    return this.type == this.TYPE_ONE_SHOT;
+  },
+  clobber: function(aObj, aAttrName) {
+    let realTimer = aObj[aAttrName];
+    realTimer.cancel();
+    this.delay = realTimer.delay;
+    this.type = realTimer.type;
+    this.callback = realTimer.callback;
+    aObj[aAttrName] = this;
+  },
+  fireNow: function() {
+    if (this._callback) {
+      this._activeCallback = this._callback;
+      if (this.oneShot)
+        this._callback = null;
+      this._activeCallback.notify();
+      this._activeCallback = null;
+    }
+    else if (this.observer) {
+      let observer = this.observer;
+      if (this.oneShot)
+        this.observer = null;
+      observer.observe(this, "timer-callback", null);
+    }
+  },
+  get active() {
+    return (this.callback != null) || (this.observer != null);
+  }
+};
new file mode 100644
--- /dev/null
+++ b/mailnews/db/gloda/test/unit/test_index_adaptive.js
@@ -0,0 +1,246 @@
+/*
+ * Test our adaptive indexing logic; the thing that tries to adjust our
+ *  indexing constants based on perceived processor utilization.  We fake all
+ *  the load stuff, of course.
+ * 
+ * Out of necessity, this test knows about the internals of the adaptive
+ *  indexing logic.
+ */
+
+do_import_script("../mailnews/db/gloda/test/resources/glodaTestHelper.js");
+do_import_script("../mailnews/db/gloda/test/resources/mockIndexer.js");
+do_import_script("../mailnews/db/gloda/test/resources/mockTimer.js");
+
+/* ===== Mock Objects ==== */
+
+var FakeStopwatch = {
+  /* (fake) public interface */
+  start: function () {
+    this.running = true;
+    dump("stopwatch started\n");
+  },
+  stop: function() {
+    this.running = false;
+    dump("stopwatch stopped\n");
+  },
+  // just always claim we're 2 seconds...
+  realTimeSeconds: 2.0,
+  cpuTimeSeconds: 0.0,
+  /* mock support */
+  running: false,
+  
+  tooMuch: function() {
+    this.cpuTimeSeconds = this.realTimeSeconds;
+  },
+  tooLittle: function() {
+    this.cpuTimeSeconds = 0.0;
+  },
+  justRight: function() {
+    this.cpuTimeSeconds = this.realTimeSeconds * GlodaIndexer._cpuTarget - 0.05;
+  }
+};
+
+// hack in our stopwatch
+GlodaIndexer._perfStopwatch = FakeStopwatch;
+// hack in a timer for the stopwatch control
+var perfTimer = new MockTimer(GlodaIndexer, "_perfTimer");
+
+/* ===== Helpers ===== */
+function fireCleanStabilizeAverage() {
+  GlodaIndexer._perfSamples = [];
+  for (let iFire = 0; iFire < GlodaIndexer._perfSamplePointCount; iFire++)
+    perfTimer.fireNow();
+}
+
+/* ===== Tests ===== */
+
+function test_sample_when_you_should() {
+  // imsInit clobbered this, put it back.
+  GlodaIndexer._indexInterval = GlodaIndexer._indexInterval_whenActive;
+  
+  do_check_false(FakeStopwatch.running);
+  do_check_false(perfTimer.active);
+  
+  MockIndexer.indexForever();
+  
+  do_check_true(FakeStopwatch.running);
+  do_check_true(perfTimer.active);
+  
+  next_test();
+}
+
+function test_throttle_up() {
+  let preTokens = GlodaIndexer._indexTokens;
+  let preInterval =  GlodaIndexer._indexInterval;
+  
+  FakeStopwatch.tooLittle();
+  // fire one too few times, verify that nothing happens for those pre-firing
+  //  times... (this only matters for the first time we sample per the sampler
+  //  being active...)
+  for (let iFire = 1; iFire < GlodaIndexer._perfSamplePointCount; iFire++) {
+    perfTimer.fireNow();
+    do_check_eq(preTokens, GlodaIndexer._indexTokens);
+    do_check_eq(preInterval, GlodaIndexer._indexInterval);
+  }
+  // now fire with some actual effect
+  perfTimer.fireNow();
+  
+  // make sure everything went in the right direction
+  do_check_true(preTokens <= GlodaIndexer._indexTokens);
+  do_check_true(preInterval >= GlodaIndexer._indexInterval);
+  // make sure something actually happened
+  do_check_true(((GlodaIndexer._indexTokens - preTokens) > 0) ||
+                ((preInterval - GlodaIndexer._indexInterval) > 0));
+                
+  next_test();
+}
+
+function test_throttle_down() {
+  let preTokens = GlodaIndexer._indexTokens;
+  let preInterval =  GlodaIndexer._indexInterval;
+
+  FakeStopwatch.tooMuch();
+  fireCleanStabilizeAverage();
+
+  // make sure everything went in the right direction
+  do_check_true(preTokens >= GlodaIndexer._indexTokens);
+  do_check_true(preInterval <= GlodaIndexer._indexInterval);
+  // make sure something actually happened
+  do_check_true(((GlodaIndexer._indexTokens - preTokens) < 0) ||
+                ((preInterval - GlodaIndexer._indexInterval) < 0));
+  
+  next_test();
+}
+
+function test_nop_on_stable() {
+
+  let preTokens = GlodaIndexer._indexTokens;
+  let preInterval =  GlodaIndexer._indexInterval;
+
+  FakeStopwatch.justRight();
+  fireCleanStabilizeAverage();
+
+  // make sure nothing happened
+  do_check_eq(preTokens, GlodaIndexer._indexTokens);
+  do_check_eq(preInterval, GlodaIndexer._indexInterval);
+  
+  next_test();
+}
+
+var MAX_STEPS_TO_CAPS = 100;
+
+function test_cap_slowest() {
+  FakeStopwatch.tooMuch();
+
+  GlodaIndexer._perfSamples = [];
+  
+  let lastTokens = GlodaIndexer._indexTokens;
+  let lastInterval =  GlodaIndexer._indexInterval;
+  for (let steps = MAX_STEPS_TO_CAPS; steps; steps--) {
+    perfTimer.fireNow();
+    
+    // make sure we're always moving in the right directions
+    do_check_true(lastTokens >= GlodaIndexer._indexTokens);
+    do_check_true(lastInterval <= GlodaIndexer._indexInterval);
+    lastTokens = GlodaIndexer._indexTokens;
+    lastInterval = GlodaIndexer._indexInterval;
+    
+    // make sure we never go above the cap
+    do_check_true(GlodaIndexer._indexInterval <=
+                  GlodaIndexer._MAX_TIMER_INTERVAL_MS);
+    // if we have hit the cap, give it a few more spins 
+    if (GlodaIndexer._indexInterval == GlodaIndexer._MAX_TIMER_INTERVAL_MS &&
+        steps > 5)
+      steps = 5;
+  }
+  // make sure we actual did hit the cap
+  do_check_eq(GlodaIndexer._indexInterval, GlodaIndexer._MAX_TIMER_INTERVAL_MS);
+  
+  next_test();
+}
+
+function test_cap_fastest() {
+  FakeStopwatch.tooLittle();
+  
+  GlodaIndexer._perfSamples = [];
+  
+  let lastTokens = GlodaIndexer._indexTokens;
+  let lastInterval =  GlodaIndexer._indexInterval;
+  for (let steps = MAX_STEPS_TO_CAPS; steps; steps--) {
+    perfTimer.fireNow();
+    
+    // make sure we're always moving in the right directions
+    do_check_true(lastTokens <= GlodaIndexer._indexTokens);
+    do_check_true(lastInterval >= GlodaIndexer._indexInterval);
+    lastTokens = GlodaIndexer._indexTokens;
+    lastInterval = GlodaIndexer._indexInterval;
+    
+    // make sure we never go below the cap
+    do_check_true(GlodaIndexer._indexInterval >=
+                  GlodaIndexer._MIN_TIMER_INTERVAL_MS);
+    // if we have hit the cap, give it a few more spins 
+    if (GlodaIndexer._indexInterval == GlodaIndexer._MIN_TIMER_INTERVAL_MS &&
+        steps > 5)
+      steps = 5;
+  }
+  // make sure we actual did hit the cap
+  do_check_eq(GlodaIndexer._indexInterval, GlodaIndexer._MIN_TIMER_INTERVAL_MS);
+  
+  next_test();
+}
+
+function test_idle() {
+  let activeTokens = GlodaIndexer._indexTokens;
+  let activeInterval =  GlodaIndexer._indexInterval;
+  
+  // go idle, make sure we switch to the right set of constants
+  GlodaIndexer.observe(null, "idle", null);
+  do_check_eq(GlodaIndexer._cpuTarget, GlodaIndexer._cpuTarget_whenIdle);
+  do_check_eq(GlodaIndexer._indexInterval,
+              GlodaIndexer._indexInterval_whenIdle);
+  do_check_eq(GlodaIndexer._indexTokens, GlodaIndexer._indexTokens_whenIdle);
+  
+  // go active, make sure we switch back
+  GlodaIndexer.observe(null, "back", null);
+  do_check_eq(GlodaIndexer._cpuTarget, GlodaIndexer._cpuTarget_whenActive);
+  do_check_eq(GlodaIndexer._indexInterval,
+              GlodaIndexer._indexInterval_whenActive);
+  do_check_eq(GlodaIndexer._indexTokens, GlodaIndexer._indexTokens_whenActive);
+  
+  // also make sure that what we switched to was what we were using before idle
+  //  happened...
+  do_check_eq(activeTokens, GlodaIndexer._indexTokens);
+  do_check_eq(activeInterval, GlodaIndexer._indexInterval);
+  
+  next_test();
+}
+
+function test_stop_sampling_when_done() {
+  do_check_true(FakeStopwatch.running);
+
+  runOnIndexingComplete(function() {
+    do_check_false(FakeStopwatch.running);
+    do_check_false(perfTimer.active);
+  
+    next_test();
+  });
+  
+  MockIndexer.stopIndexingForever();
+}
+
+/* ===== Driver ====== */
+
+var tests = [
+  test_sample_when_you_should,
+  test_throttle_up,
+  test_throttle_down,
+  test_nop_on_stable,
+  test_cap_slowest,
+  test_cap_fastest,
+  test_idle,
+  test_stop_sampling_when_done
+];
+
+function run_test() {
+  glodaHelperRunTests(tests);
+}