Bug 638301: SyncEngine._processIncoming: ensure we don't hit URL length limit for fetchBatch on desktop. r=philiKON
authorRichard Newman <rnewman@mozilla.com>
Wed, 06 Apr 2011 17:18:22 -0700
changeset 67768 f5be32f40901a67806c9dcc23627a7735e56631c
parent 67767 7fa18567d4aa65fc66d40f6553b476d7d38eefde
child 67769 d08e1abcb4e564f8287f1786b6a5bb9c700c7df5
push id19427
push userpweitershausen@mozilla.com
push dateSun, 10 Apr 2011 18:54:44 +0000
treeherdermozilla-central@21ce62e6aebe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersphiliKON
bugs638301
milestone2.2a1pre
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 638301: SyncEngine._processIncoming: ensure we don't hit URL length limit for fetchBatch on desktop. r=philiKON
services/sync/modules/constants.js
services/sync/modules/engines.js
services/sync/tests/unit/test_syncengine_sync.js
--- a/services/sync/modules/constants.js
+++ b/services/sync/modules/constants.js
@@ -83,19 +83,25 @@ MAX_IGNORE_ERROR_COUNT:                5
 // HMAC event handling timeout.
 // 10 minutes: a compromise between the multi-desktop sync interval
 // and the mobile sync interval.
 HMAC_EVENT_INTERVAL:                   600000,
 
 // How long to wait between sync attempts if the Master Password is locked.
 MASTER_PASSWORD_LOCKED_RETRY_INTERVAL: 15 * 60 * 1000,   // 15 minutes
 
+// Separate from the ID fetch batch size to allow tuning for mobile.
+MOBILE_BATCH_SIZE:                     50,
+
 // 50 is hardcoded here because of URL length restrictions.
-// (GUIDs can be up to 64 chars long)
-MOBILE_BATCH_SIZE:                     50,
+// (GUIDs can be up to 64 chars long.)
+// Individual engines can set different values for their limit if their
+// identifiers are shorter.
+DEFAULT_GUID_FETCH_BATCH_SIZE:         50,
+DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE:  50,
 
 // Default batch size for applying incoming records.
 DEFAULT_STORE_BATCH_SIZE:              1,
 HISTORY_STORE_BATCH_SIZE:              50, // same as MOBILE_BATCH_SIZE
 FORMS_STORE_BATCH_SIZE:                50, // same as MOBILE_BATCH_SIZE
 PASSWORDS_STORE_BATCH_SIZE:            50, // same as MOBILE_BATCH_SIZE
 
 // score thresholds for early syncs
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -424,17 +424,27 @@ SyncEngine.kRecoveryStrategy = {
   retry:  "retry",
   error:  "error"
 };
 
 SyncEngine.prototype = {
   __proto__: Engine.prototype,
   _recordObj: CryptoWrapper,
   version: 1,
+  
+  // How many records to pull in a single sync. This is primarily to avoid very
+  // long first syncs against profiles with many history records.
   downloadLimit: null,
+  
+  // How many records to pull at one time when specifying IDs. This is to avoid
+  // URI length limitations.
+  guidFetchBatchSize: DEFAULT_GUID_FETCH_BATCH_SIZE,
+  mobileGUIDFetchBatchSize: DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE,
+  
+  // How many records to process in a single batch.
   applyIncomingBatchSize: DEFAULT_STORE_BATCH_SIZE,
 
   get storageURL() Svc.Prefs.get("clusterURL") + Svc.Prefs.get("storageAPI") +
     "/" + ID.get("WeaveID").username + "/storage/",
 
   get engineURL() this.storageURL + this.name,
 
   get cryptoKeysURL() this.storageURL + "crypto/keys",
@@ -593,17 +603,19 @@ SyncEngine.prototype = {
 
   // Process incoming records
   _processIncoming: function SyncEngine__processIncoming() {
     this._log.trace("Downloading & applying server changes");
 
     // Figure out how many total items to fetch this sync; do less on mobile.
     let batchSize = Infinity;
     let newitems = new Collection(this.engineURL, this._recordObj);
-    if (Svc.Prefs.get("client.type") == "mobile") {
+    let isMobile = (Svc.Prefs.get("client.type") == "mobile");
+
+    if (isMobile) {
       batchSize = MOBILE_BATCH_SIZE;
     }
     newitems.newer = this.lastSync;
     newitems.full = true;
     newitems.limit = batchSize;
 
     let count = {applied: 0, failed: 0, reconciled: 0};
     let handled = [];
@@ -745,17 +757,22 @@ SyncEngine.prototype = {
     }
 
     // Fast-foward the lastSync timestamp since we have stored the
     // remaining items in toFetch.
     if (this.lastSync < this.lastModified) {
       this.lastSync = this.lastModified;
     }
 
-    // Mobile: process any backlog of GUIDs
+    // Process any backlog of GUIDs.
+    // At this point we impose an upper limit on the number of items to fetch
+    // in a single request, even for desktop, to avoid hitting URI limits.
+    batchSize = isMobile ? this.mobileGUIDFetchBatchSize :
+                           this.guidFetchBatchSize;
+
     while (fetchBatch.length) {
       // Reuse the original query, but get rid of the restricting params
       // and batch remaining records.
       newitems.limit = 0;
       newitems.newer = 0;
       newitems.ids = fetchBatch.slice(0, batchSize);
 
       // Reuse the existing record handler set earlier
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -819,18 +819,30 @@ function test_processIncoming_failed_rec
       throw "I don't like this record! Baaaaaah!";
     }
     return this._applyIncoming.apply(this, arguments);
   };
 
   let meta_global = Records.set(engine.metaURL, new WBORecord(engine.metaURL));
   meta_global.payload.engines = {steam: {version: engine.version,
                                          syncID: engine.syncID}};
+
+  // Keep track of requests made of a collection.
+  let count = 0;
+  let uris  = [];
+  function recording_handler(collection) {
+    let h = collection.handler();
+    return function(req, res) {
+      ++count;
+      uris.push(req.path + "?" + req.queryString);
+      return h(req, res);
+    };
+  }
   let server = sync_httpd_setup({
-      "/1.0/foo/storage/steam": collection.handler()
+      "/1.0/foo/storage/steam": recording_handler(collection)
   });
   do_test_pending();
 
   try {
 
     // Confirm initial environment
     do_check_eq(engine.lastSync, 0);
     do_check_eq(engine.toFetch.length, 0);
@@ -858,16 +870,43 @@ function test_processIncoming_failed_rec
     BOGUS_RECORDS.sort();
     for (let i = 0; i < engine.toFetch.length; i++) {
       do_check_eq(engine.toFetch[i], BOGUS_RECORDS[i]);
     }
 
     // Ensure the observer was notified
     do_check_eq(observerData, engine.name);
     do_check_eq(observerSubject.failed, BOGUS_RECORDS.length);
+
+    // Testing batching of failed item fetches.
+    // Try to sync again. Ensure that we split the request into chunks to avoid
+    // URI length limitations.
+    function batchDownload(batchSize) {
+      count = 0;
+      uris  = [];
+      engine.guidFetchBatchSize = batchSize;
+      engine._processIncoming();
+      _("Tried again. Requests: " + count + "; URIs: " + JSON.stringify(uris));
+      return count;
+    }
+    
+    // There are 8 bad records, so this needs 3 fetches.
+    _("Test batching with ID batch size 3, normal mobile batch size.");
+    do_check_eq(batchDownload(3), 3);
+
+    // Now see with a more realistic limit.
+    _("Test batching with sufficient ID batch size.");
+    do_check_eq(batchDownload(BOGUS_RECORDS.length), 1);
+
+    // If we're on mobile, that limit is used by default.
+    _("Test batching with tiny mobile batch size.");
+    Svc.Prefs.set("client.type", "mobile");
+    engine.mobileGUIDFetchBatchSize = 2;
+    do_check_eq(batchDownload(BOGUS_RECORDS.length), 4);
+
   } finally {
     server.stop(do_test_finished);
     Svc.Prefs.resetBranch("");
     Records.clearCache();
     syncTesting = new SyncTestingInfrastructure(makeSteamEngine);
   }
 }