Bug 1393659 - Fix inconsistent handling of max_post_bytes and max_request_bytes r=markh
authorThom Chiovoloni <tchiovoloni@mozilla.com>
Wed, 13 Sep 2017 18:02:41 -0700
changeset 433785 4c95e94b973b1b57965f23f3126119ce7d37671b
parent 433784 af468f61fe8a0ee33de3f4558e46217c1ee6d172
child 433786 9d24dfb51c504a3543b69007eae9924fffb9dd0f
push id1567
push userjlorenzo@mozilla.com
push dateThu, 02 Nov 2017 12:36:05 +0000
treeherdermozilla-release@e512c14a0406 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmarkh
bugs1393659
milestone57.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1393659 - Fix inconsistent handling of max_post_bytes and max_request_bytes r=markh MozReview-Commit-ID: 4jwpAYNuoQj
services/sync/modules/constants.js
services/sync/modules/engines.js
services/sync/modules/engines/tabs.js
services/sync/modules/record.js
services/sync/modules/service.js
services/sync/tests/unit/test_postqueue.js
services/sync/tests/unit/test_syncengine_sync.js
services/sync/tests/unit/test_tab_engine.js
services/sync/tests/unit/test_tab_store.js
tools/lint/eslint/modules.json
--- a/services/sync/modules/constants.js
+++ b/services/sync/modules/constants.js
@@ -59,20 +59,16 @@ DEFAULT_STORE_BATCH_SIZE:              1
 HISTORY_STORE_BATCH_SIZE:              50,
 FORMS_STORE_BATCH_SIZE:                50,
 PASSWORDS_STORE_BATCH_SIZE:            50,
 
 // Default batch size for download batching
 // (how many records are fetched at a time from the server when batching is used).
 DEFAULT_DOWNLOAD_BATCH_SIZE:           1000,
 
-
-// Default maximum size for a record payload
-DEFAULT_MAX_RECORD_PAYLOAD_BYTES:      262144,  // 256KB
-
 // score thresholds for early syncs
 SINGLE_USER_THRESHOLD:                 1000,
 MULTI_DEVICE_THRESHOLD:                300,
 
 // Other score increment constants
 SCORE_INCREMENT_SMALL:                 1,
 SCORE_INCREMENT_MEDIUM:                10,
 
@@ -81,20 +77,20 @@ SCORE_INCREMENT_XLARGE:                3
 
 // Delay before incrementing global score
 SCORE_UPDATE_DELAY:                    100,
 
 // Delay for the back observer debouncer. This is chosen to be longer than any
 // observed spurious idle/back events and short enough to pre-empt user activity.
 IDLE_OBSERVER_BACK_DELAY:              100,
 
-// Max number of records or bytes to upload in a single POST - we'll do multiple POSTS if either
-// MAX_UPLOAD_RECORDS or MAX_UPLOAD_BYTES is hit)
-MAX_UPLOAD_RECORDS:                    100,
-MAX_UPLOAD_BYTES:                      1024 * 1023, // just under 1MB
+// Duplicate URI_LENGTH_MAX from Places (from nsNavHistory.h), used to discard
+// tabs with huge uris during tab sync.
+URI_LENGTH_MAX:                        65536,
+
 MAX_HISTORY_UPLOAD:                    5000,
 MAX_HISTORY_DOWNLOAD:                  5000,
 
 // TTL of the message sent to another device when sending a tab
 NOTIFY_TAB_SENT_TTL_SECS:              1 * 3600, // 1 hour
 
 // Top-level statuses:
 STATUS_OK:                             "success.status_ok",
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -940,24 +940,16 @@ SyncEngine.prototype = {
   get lastSyncLocal() {
     return parseInt(Svc.Prefs.get(this.name + ".lastSyncLocal", "0"), 10);
   },
   set lastSyncLocal(value) {
     // Store as a string because pref can only store C longs as numbers.
     Svc.Prefs.set(this.name + ".lastSyncLocal", value.toString());
   },
 
-  get maxRecordPayloadBytes() {
-    let serverConfiguration = this.service.serverConfiguration;
-    if (serverConfiguration && serverConfiguration.max_record_payload_bytes) {
-      return serverConfiguration.max_record_payload_bytes;
-    }
-    return DEFAULT_MAX_RECORD_PAYLOAD_BYTES;
-  },
-
   /*
    * Returns a changeset for this sync. Engine implementations can override this
    * method to bypass the tracker for certain or all changed items.
    */
   async getChangedIDs() {
     return this._tracker.changedIDs;
   },
 
@@ -1674,23 +1666,16 @@ SyncEngine.prototype = {
             out = await this._createTombstone(id);
           } else {
             out = await this._createRecord(id);
           }
           if (this._log.level <= Log.Level.Trace)
             this._log.trace("Outgoing: " + out);
 
           out.encrypt(this.service.collectionKeys.keyForCollection(this.name));
-          let payloadLength = JSON.stringify(out.payload).length;
-          if (payloadLength > this.maxRecordPayloadBytes) {
-            if (this.allowSkippedRecord) {
-              this._modified.delete(id); // Do not attempt to sync that record again
-            }
-            throw new Error(`Payload too big: ${payloadLength} bytes`);
-          }
           ok = true;
         } catch (ex) {
           this._log.warn("Error creating record", ex);
           ++counts.failed;
           if (Async.isShutdownException(ex) || !this.allowSkippedRecord) {
             if (!this.allowSkippedRecord) {
               // Don't bother for shutdown errors
               Observers.notify("weave:engine:sync:uploaded", counts, this.name);
@@ -1699,16 +1684,17 @@ SyncEngine.prototype = {
           }
         }
         if (ok) {
           let { enqueued, error } = await postQueue.enqueue(out);
           if (!enqueued) {
             ++counts.failed;
             if (!this.allowSkippedRecord) {
               Observers.notify("weave:engine:sync:uploaded", counts, this.name);
+              this._log.warn(`Failed to enqueue record "${id}" (aborting)`, error);
               throw error;
             }
             this._modified.delete(id);
             this._log.warn(`Failed to enqueue record "${id}" (skipping)`, error);
           }
         }
         await Async.promiseYield();
       }
--- a/services/sync/modules/engines/tabs.js
+++ b/services/sync/modules/engines/tabs.js
@@ -165,17 +165,17 @@ TabStore.prototype = {
         let current = entries[index - 1];
 
         // We ignore the tab completely if the current entry url is
         // not acceptable (we need something accurate to open).
         if (!acceptable(current.url)) {
           continue;
         }
 
-        if (current.url.length >= (MAX_UPLOAD_BYTES - 1000)) {
+        if (current.url.length > URI_LENGTH_MAX) {
           this._log.trace("Skipping over-long URL.");
           continue;
         }
 
         // The element at `index` is the current page. Previous URLs were
         // previously visited URLs; subsequent URLs are in the 'forward' stack,
         // which we can't represent in Sync, so we truncate here.
         let candidates = (entries.length == index) ?
@@ -213,18 +213,19 @@ TabStore.prototype = {
     let tabs = this.getAllTabs(true).sort(function(a, b) {
       return b.lastUsed - a.lastUsed;
     });
 
     // Figure out how many tabs we can pack into a payload.
     // We use byteLength here because the data is not encrypted in ascii yet.
     let size = new TextEncoder("utf-8").encode(JSON.stringify(tabs)).byteLength;
     let origLength = tabs.length;
+    const maxPayloadSize = this.engine.service.getMaxRecordPayloadSize();
     // See bug 535326 comment 8 for an explanation of the estimation
-    const MAX_TAB_SIZE = this.engine.maxRecordPayloadBytes / 4 * 3 - 1500;
+    const MAX_TAB_SIZE = maxPayloadSize / 4 * 3 - 1500;
     if (size > MAX_TAB_SIZE) {
       // Estimate a little more than the direct fraction to maximize packing
       let cutoff = Math.ceil(tabs.length * MAX_TAB_SIZE / size);
       tabs = tabs.slice(0, cutoff + 1);
 
       // Keep dropping off the last entry until the data fits
       while (JSON.stringify(tabs).length > MAX_TAB_SIZE)
         tabs.pop();
--- a/services/sync/modules/record.js
+++ b/services/sync/modules/record.js
@@ -786,45 +786,49 @@ Collection.prototype = {
       this.batch = batch;
       this.commit = commit;
       for (let [header, value] of headers) {
         this.setHeader(header, value);
       }
       return Resource.prototype.post.call(this, data);
     }
     let getConfig = (name, defaultVal) => {
+      // serverConfiguration is allowed to be missing during tests.
       if (this._service.serverConfiguration && this._service.serverConfiguration.hasOwnProperty(name)) {
         return this._service.serverConfiguration[name];
       }
       return defaultVal;
-    }
+    };
 
+    // On a server that does not support the batch API, we expect the /info/configuration
+    // endpoint to provide "max_record_payload_bytes" and "max_request_bytes" and limits.
+    // On a server that supports the batching API, we expect "max_record_payload_bytes"
+    // (as before), as well as "max_post_bytes", "max_post_records", "max_total_bytes" and
+    // "max_total_records". Much of the complexity here and in enqueue is attempting to
+    // handle both these cases simultaneously.
     let config = {
-      max_post_bytes: getConfig("max_post_bytes", MAX_UPLOAD_BYTES),
-      max_post_records: getConfig("max_post_records", MAX_UPLOAD_RECORDS),
+      // Note that from the server's POV, max_post_bytes is the sum of payload
+      // lengths, but we treat it equivalently to max_request_bytes (which is
+      // payload + metadata lengths).
+      max_post_bytes: getConfig("max_post_bytes",
+        getConfig("max_request_bytes", 260 * 1024)),
+
+      max_post_records: getConfig("max_post_records", Infinity),
 
       max_batch_bytes: getConfig("max_total_bytes", Infinity),
       max_batch_records: getConfig("max_total_records", Infinity),
-    }
+      max_record_payload_bytes: getConfig("max_record_payload_bytes", 256 * 1024),
+    };
 
-    // Handle config edge cases
-    if (config.max_post_records <= 0) { config.max_post_records = MAX_UPLOAD_RECORDS; }
-    if (config.max_batch_records <= 0) { config.max_batch_records = Infinity; }
-    if (config.max_post_bytes <= 0) { config.max_post_bytes = MAX_UPLOAD_BYTES; }
-    if (config.max_batch_bytes <= 0) { config.max_batch_bytes = Infinity; }
-
-    // Max size of BSO payload is 256k. This assumes at most 4k of overhead,
-    // which sounds like plenty. If the server says it can't handle this, we
-    // might have valid records we can't sync, so we give up on syncing.
-    let requiredMax = 260 * 1024;
-    if (config.max_post_bytes < requiredMax) {
+    if (config.max_post_bytes <= config.max_record_payload_bytes) {
       this._log.error("Server configuration max_post_bytes is too low", config);
       throw new Error("Server configuration max_post_bytes is too low");
     }
 
+    this._log.trace("new PostQueue created with config", config);
     return new PostQueue(poster, timestamp, config, log, postCallback);
   },
 };
 
 /* A helper to manage the posting of records while respecting the various
    size limits.
 
    This supports the concept of a server-side "batch". The general idea is:
@@ -884,48 +888,52 @@ PostQueue.prototype = {
   async enqueue(record) {
     // We want to ensure the record has a .toJSON() method defined - even
     // though JSON.stringify() would implicitly call it, the stringify might
     // still work even if it isn't defined, which isn't what we want.
     let jsonRepr = record.toJSON();
     if (!jsonRepr) {
       throw new Error("You must only call this with objects that explicitly support JSON");
     }
+
     let bytes = JSON.stringify(jsonRepr);
 
-    // Do a flush if we can't add this record without exceeding our single-request
-    // limits, or without exceeding the total limit for a single batch.
-    let newLength = this.queued.length + bytes.length + 2; // extras for leading "[" / "," and trailing "]"
+    // Tests sometimes return objects without payloads, and we just use the
+    // byte length for those cases.
+    let payloadLength = jsonRepr.payload ? jsonRepr.payload.length : bytes.length;
+    if (payloadLength > this.config.max_record_payload_bytes) {
+      return { enqueued: false, error: new Error("Single record too large to submit to server") };
+    }
 
-    let maxAllowedBytes = Math.min(256 * 1024, this.config.max_post_bytes);
+    // The `+ 2` is to account for the 2-byte (maximum) overhead (one byte for
+    // the leading comma or "[", which all records will have, and the other for
+    // the final trailing "]", only present for the last record).
+    let newLength = this.queued.length + bytes.length + 2;
+    let newRecordCount = this.numQueued + 1;
 
-    let postSizeExceeded = this.numQueued >= this.config.max_post_records ||
-                           newLength >= maxAllowedBytes;
+    // Note that the max_post_records and max_batch_records server limits are
+    // inclusive (e.g. if the max_post_records == 100, it will allow a post with
+    // 100 records), but the byte limits are not. (See
+    // https://github.com/mozilla-services/server-syncstorage/issues/73)
 
-    let batchSizeExceeded = (this.numQueued + this.numAlreadyBatched) >= this.config.max_batch_records ||
+    // Have we exceeeded the maximum size or record count for a single POST?
+    let postSizeExceeded = newRecordCount > this.config.max_post_records ||
+                           newLength >= this.config.max_post_bytes;
+
+    // Have we exceeded the maximum size or record count for the entire batch?
+    let batchSizeExceeded = (newRecordCount + this.numAlreadyBatched) > this.config.max_batch_records ||
                             (newLength + this.bytesAlreadyBatched) >= this.config.max_batch_bytes;
 
-    let singleRecordTooBig = bytes.length + 2 > maxAllowedBytes;
-
     if (postSizeExceeded || batchSizeExceeded) {
-      this.log.trace(`PostQueue flushing due to postSizeExceeded=${postSizeExceeded}, batchSizeExceeded=${batchSizeExceeded}` +
-                     `, max_batch_bytes: ${this.config.max_batch_bytes}, max_post_bytes: ${this.config.max_post_bytes}`);
-
-      if (singleRecordTooBig) {
-        return { enqueued: false, error: new Error("Single record too large to submit to server") };
-      }
-
+      this.log.trace("PostQueue flushing due to ", { postSizeExceeded, batchSizeExceeded });
       // We need to write the queue out before handling this one, but we only
       // commit the batch (and thus start a new one) if the batch is full.
-      // Note that if a single record is too big for the batch or post, then
-      // the batch may be empty, and so we don't flush in that case.
-      if (this.numQueued) {
-        await this.flush(batchSizeExceeded || singleRecordTooBig);
-      }
+      await this.flush(batchSizeExceeded);
     }
+
     // Either a ',' or a '[' depending on whether this is the first record.
     this.queued += this.numQueued ? "," : "[";
     this.queued += bytes;
     this.numQueued++;
     return { enqueued: true };
   },
 
   async flush(finalBatchPost) {
--- a/services/sync/modules/service.js
+++ b/services/sync/modules/service.js
@@ -613,16 +613,26 @@ Sync11Service.prototype = {
     } catch (ex) {
       // This means no keys are present, or there's a network error.
       this._log.debug("Failed to fetch and verify keys", ex);
       this.errorHandler.checkServerError(ex);
       return false;
     }
   },
 
+  getMaxRecordPayloadSize() {
+    let config = this.serverConfiguration;
+    if (!config || !config.max_record_payload_bytes) {
+      this._log.warn("No config or incomplete config in getMaxRecordPayloadSize."
+                     + " Are we running tests?");
+      return 256 * 1024;
+    }
+    return config.max_record_payload_bytes;
+  },
+
   async verifyLogin(allow40XRecovery = true) {
     if (!this.identity.username) {
       this._log.warn("No username in verifyLogin.");
       this.status.login = LOGIN_FAILED_NO_USERNAME;
       return false;
     }
 
     // Attaching auth credentials to a request requires access to
--- a/services/sync/tests/unit/test_postqueue.js
+++ b/services/sync/tests/unit/test_postqueue.js
@@ -31,16 +31,17 @@ function makePostQueue(config, lastModTi
 }
 
 add_task(async function test_simple() {
   let config = {
     max_post_bytes: 1000,
     max_post_records: 100,
     max_batch_bytes: Infinity,
     max_batch_records: Infinity,
+    max_record_payload_bytes: 1000,
   }
 
   const time = 11111111;
 
   function* responseGenerator() {
     yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 100, "x-last-modified": time + 100 } };
   }
 
@@ -55,96 +56,92 @@ add_task(async function test_simple() {
     batch: "true"}]);
 });
 
 // Test we do the right thing when we need to make multiple posts when there
 // are no batch semantics
 add_task(async function test_max_post_bytes_no_batch() {
   let config = {
     max_post_bytes: 50,
-    max_post_records: 4,
+    max_post_records: Infinity,
     max_batch_bytes: Infinity,
     max_batch_records: Infinity,
+    max_record_payload_bytes: 50,
   }
 
   const time = 11111111;
   function* responseGenerator() {
     yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 100, "x-last-modified": time + 100 } };
     yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 200, "x-last-modified": time + 200 } };
   }
 
   let { pq, stats } = makePostQueue(config, time, responseGenerator());
   await pq.enqueue(makeRecord(20)); // total size now 22 bytes - "[" + record + "]"
   await pq.enqueue(makeRecord(20)); // total size now 43 bytes - "[" + record + "," + record + "]"
   await pq.enqueue(makeRecord(20)); // this will exceed our byte limit, so will be in the 2nd POST.
   await pq.flush(true);
-
   deepEqual(stats.posts, [
     {
       nbytes: 43, // 43 for the first post
       commit: false,
       headers: [["x-if-unmodified-since", time]],
       batch: "true",
     }, {
       nbytes: 22,
       commit: false, // we know we aren't in a batch, so never commit.
       headers: [["x-if-unmodified-since", time + 100]],
       batch: null,
     }
   ]);
   equal(pq.lastModified, time + 200);
 });
 
-// Similar to the above, but we've hit max_records instead of max_bytes.
-add_task(async function test_max_post_records_no_batch() {
+
+add_task(async function test_max_record_payload_bytes_no_batch() {
   let config = {
     max_post_bytes: 100,
-    max_post_records: 2,
+    max_post_records: Infinity,
     max_batch_bytes: Infinity,
     max_batch_records: Infinity,
+    max_record_payload_bytes: 50,
   }
 
   const time = 11111111;
 
   function* responseGenerator() {
     yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 100, "x-last-modified": time + 100 } };
-    yield { success: true, status: 200, headers: { "x-weave-timestamp": time + 200, "x-last-modified": time + 200 } };
   }
 
   let { pq, stats } = makePostQueue(config, time, responseGenerator());
-  await pq.enqueue(makeRecord(20)); // total size now 22 bytes - "[" + record + "]"
-  await pq.enqueue(makeRecord(20)); // total size now 43 bytes - "[" + record + "," + record + "]"
-  await pq.enqueue(makeRecord(20)); // this will exceed our records limit, so will be in the 2nd POST.
+  await pq.enqueue(makeRecord(50)); // total size now 52 bytes - "[" + record + "]"
+  await pq.enqueue(makeRecord(46)); // total size now 99 bytes - "[" + record0 + "," + record1 + "]"
+
   await pq.flush(true);
 
   deepEqual(stats.posts, [
     {
-      nbytes: 43, // 43 for the first post
-      commit: false,
+      nbytes: 99,
+      commit: true, // we know we aren't in a batch, so never commit.
       batch: "true",
       headers: [["x-if-unmodified-since", time]],
-    }, {
-      nbytes: 22,
-      commit: false, // we know we aren't in a batch, so never commit.
-      batch: null,
-      headers: [["x-if-unmodified-since", time + 100]],
     }
   ]);
-  equal(pq.lastModified, time + 200);
+  equal(pq.lastModified, time + 100);
 });
 
 // Batch tests.
 
 // Test making a single post when batch semantics are in place.
 add_task(async function test_single_batch() {
   let config = {
     max_post_bytes: 1000,
     max_post_records: 100,
     max_batch_bytes: 2000,
     max_batch_records: 200,
+    max_record_payload_bytes: 1000,
   }
   const time = 11111111;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
     };
   }
 
@@ -165,16 +162,17 @@ add_task(async function test_single_batc
 // Test we do the right thing when we need to make multiple posts when there
 // are batch semantics in place.
 add_task(async function test_max_post_bytes_batch() {
   let config = {
     max_post_bytes: 50,
     max_post_records: 4,
     max_batch_bytes: 5000,
     max_batch_records: 100,
+    max_record_payload_bytes: 50,
   }
 
   const time = 11111111;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
     };
     yield { success: true, status: 202, obj: { batch: 1234 },
@@ -201,22 +199,23 @@ add_task(async function test_max_post_by
       headers: [["x-if-unmodified-since", time]],
     }
   ]);
 
   equal(pq.lastModified, time + 200);
 });
 
 // Test we do the right thing when the batch bytes limit is exceeded.
-add_task(async function test_max_post_bytes_batch() {
+add_task(async function test_max_batch_bytes_batch() {
   let config = {
     max_post_bytes: 50,
     max_post_records: 20,
     max_batch_bytes: 70,
     max_batch_records: 100,
+    max_record_payload_bytes: 50,
   }
 
   const time0 = 11111111;
   const time1 = 22222222;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time0, "x-weave-timestamp": time0 + 100 },
     };
@@ -276,16 +275,17 @@ add_task(async function test_max_post_by
 // Test we split up the posts when we exceed the record limit when batch semantics
 // are in place.
 add_task(async function test_max_post_bytes_batch() {
   let config = {
     max_post_bytes: 1000,
     max_post_records: 2,
     max_batch_bytes: 5000,
     max_batch_records: 100,
+    max_record_payload_bytes: 1000,
   }
 
   const time = 11111111;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
     };
     yield { success: true, status: 202, obj: { batch: 1234 },
@@ -318,16 +318,17 @@ add_task(async function test_max_post_by
 
 // Test that a single huge record fails to enqueue
 add_task(async function test_huge_record() {
   let config = {
     max_post_bytes: 50,
     max_post_records: 100,
     max_batch_bytes: 5000,
     max_batch_records: 100,
+    max_record_payload_bytes: 50,
   }
 
   const time = 11111111;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
     };
     yield { success: true, status: 202, obj: { batch: 1234 },
@@ -368,16 +369,17 @@ add_task(async function test_huge_record
 
 // Test we do the right thing when the batch record limit is exceeded.
 add_task(async function test_max_records_batch() {
   let config = {
     max_post_bytes: 1000,
     max_post_records: 3,
     max_batch_bytes: 10000,
     max_batch_records: 5,
+    max_record_payload_bytes: 1000,
   }
 
   const time0 = 11111111;
   const time1 = 22222222;
   function* responseGenerator() {
     yield { success: true, status: 202, obj: { batch: 1234 },
             headers: { "x-last-modified": time0, "x-weave-timestamp": time0 + 100 },
     };
@@ -430,8 +432,52 @@ add_task(async function test_max_records
       commit: true,
       batch: 5678,
       headers: [["x-if-unmodified-since", time1]],
     },
   ]);
 
   equal(pq.lastModified, time1 + 200);
 });
+
+// Test we do the right thing when the batch byte limit is met but not exceeded.
+add_task(async function test_packed_batch() {
+  let config = {
+    max_post_bytes: 54,
+    max_post_records: 4,
+    max_batch_bytes: 107,
+    max_batch_records: 100,
+    max_record_payload_bytes: 25,
+  }
+
+  const time = 11111111;
+  function* responseGenerator() {
+    yield { success: true, status: 202, obj: { batch: 1234 },
+            headers: { "x-last-modified": time, "x-weave-timestamp": time + 100 },
+    };
+    yield { success: true, status: 202, obj: { batch: 1234 },
+            headers: { "x-last-modified": time + 200, "x-weave-timestamp": time + 200 },
+   };
+  }
+
+  let { pq, stats } = makePostQueue(config, time, responseGenerator());
+  ok((await pq.enqueue(makeRecord(25))).enqueued); // total size now 27 bytes - "[" + record + "]"
+  ok((await pq.enqueue(makeRecord(25))).enqueued); // total size now 53 bytes - "[" + record + "," + record + "]"
+  ok((await pq.enqueue(makeRecord(25))).enqueued); // this will exceed our byte limit, so will be in the 2nd POST.
+  ok((await pq.enqueue(makeRecord(25))).enqueued);
+  await pq.flush(true);
+
+  deepEqual(stats.posts, [
+    {
+      nbytes: 53,
+      commit: false,
+      batch: "true",
+      headers: [["x-if-unmodified-since", time]],
+    }, {
+      nbytes: 53,
+      commit: true,
+      batch: 1234,
+      headers: [["x-if-unmodified-since", time]],
+    }
+  ]);
+
+  equal(pq.lastModified, time + 200);
+});
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -1339,83 +1339,16 @@ add_task(async function test_uploadOutgo
     do_check_eq(engine._tracker.changedIDs.scotsman, SCOTSMAN_CHANGED);
     do_check_eq(engine._tracker.changedIDs.peppercorn, PEPPERCORN_CHANGED);
 
   } finally {
     await promiseClean(engine, server);
   }
 });
 
-/* A couple of "functional" tests to ensure we split records into appropriate
-   POST requests. More comprehensive unit-tests for this "batching" are in
-   test_postqueue.js.
-*/
-add_task(async function test_uploadOutgoing_MAX_UPLOAD_RECORDS() {
-  _("SyncEngine._uploadOutgoing uploads in batches of MAX_UPLOAD_RECORDS");
-
-  let collection = new ServerCollection();
-
-  // Let's count how many times the client posts to the server
-  var noOfUploads = 0;
-  collection.post = (function(orig) {
-    return function(data, request) {
-      // This test doesn't arrange for batch semantics - so we expect the
-      // first request to come in with batch=true and the others to have no
-      // batch related headers at all (as the first response did not provide
-      // a batch ID)
-      if (noOfUploads == 0) {
-        do_check_eq(request.queryString, "batch=true");
-      } else {
-        do_check_eq(request.queryString, "");
-      }
-      noOfUploads++;
-      return orig.call(this, data, request);
-    };
-  }(collection.post));
-
-  // Create a bunch of records (and server side handlers)
-  let engine = makeRotaryEngine();
-  for (var i = 0; i < 234; i++) {
-    let id = "record-no-" + i;
-    engine._store.items[id] = "Record No. " + i;
-    engine._tracker.addChangedID(id, 0);
-    collection.insert(id);
-  }
-
-  let meta_global = Service.recordManager.set(engine.metaURL,
-                                              new WBORecord(engine.metaURL));
-  meta_global.payload.engines = {rotary: {version: engine.version,
-                                         syncID: engine.syncID}};
-
-  let server = sync_httpd_setup({
-      "/1.1/foo/storage/rotary": collection.handler()
-  });
-
-  await SyncTestingInfrastructure(server);
-  try {
-
-    // Confirm initial environment.
-    do_check_eq(noOfUploads, 0);
-
-    await engine._syncStartup();
-    await engine._uploadOutgoing();
-
-    // Ensure all records have been uploaded.
-    for (i = 0; i < 234; i++) {
-      do_check_true(!!collection.payload("record-no-" + i));
-    }
-
-    // Ensure that the uploads were performed in batches of MAX_UPLOAD_RECORDS.
-    do_check_eq(noOfUploads, Math.ceil(234 / MAX_UPLOAD_RECORDS));
-
-  } finally {
-    await cleanAndGo(engine, server);
-  }
-});
-
 async function createRecordFailTelemetry(allowSkippedRecord) {
   Service.identity.username = "foo";
   let collection = new ServerCollection();
   collection._wbos.flying = new ServerWBO("flying");
   collection._wbos.scotsman = new ServerWBO("scotsman");
 
   let server = sync_httpd_setup({
       "/1.1/foo/storage/rotary": collection.handler()
@@ -1497,23 +1430,23 @@ add_task(async function test_uploadOutgo
 });
 
 add_task(async function test_uploadOutgoing_createRecord_throws_dontAllowSkipRecord() {
   _("SyncEngine._uploadOutgoing will throw if createRecord throws and allowSkipRecord is set to false");
   await createRecordFailTelemetry(false);
 });
 
 add_task(async function test_uploadOutgoing_largeRecords() {
-  _("SyncEngine._uploadOutgoing throws on records larger than MAX_UPLOAD_BYTES");
+  _("SyncEngine._uploadOutgoing throws on records larger than the max record payload size");
 
   let collection = new ServerCollection();
 
   let engine = makeRotaryEngine();
   engine.allowSkippedRecord = false;
-  engine._store.items["large-item"] = "Y".repeat(MAX_UPLOAD_BYTES * 2);
+  engine._store.items["large-item"] = "Y".repeat(Service.getMaxRecordPayloadSize() * 2);
   engine._tracker.addChangedID("large-item", 0);
   collection.insert("large-item");
 
 
   let meta_global = Service.recordManager.set(engine.metaURL,
                                               new WBORecord(engine.metaURL));
   meta_global.payload.engines = {rotary: {version: engine.version,
                                          syncID: engine.syncID}};
@@ -1650,16 +1583,20 @@ add_task(async function test_syncFinish_
 
 add_task(async function test_sync_partialUpload() {
   _("SyncEngine.sync() keeps changedIDs that couldn't be uploaded.");
 
   let collection = new ServerCollection();
   let server = sync_httpd_setup({
       "/1.1/foo/storage/rotary": collection.handler()
   });
+  let oldServerConfiguration = Service.serverConfiguration;
+  Service.serverConfiguration = {
+    max_post_records: 100
+  };
   await SyncTestingInfrastructure(server);
   generateNewKeys(Service.collectionKeys);
 
   let engine = makeRotaryEngine();
   engine.lastSync = 123; // needs to be non-zero so that tracker is queried
   engine.lastSyncLocal = 456;
 
   // Let the third upload fail completely
@@ -1703,25 +1640,26 @@ add_task(async function test_sync_partia
 
     // The timestamp has been updated.
     do_check_true(engine.lastSyncLocal > 456);
 
     for (let i = 0; i < 234; i++) {
       let id = "record-no-" + i;
       // Ensure failed records are back in the tracker:
       // * records no. 23 and 42 were rejected by the server,
-      // * records no. 200 and higher couldn't be uploaded because we failed
-      //   hard on the 3rd upload.
+      // * records after the third batch and higher couldn't be uploaded because
+      //   we failed hard on the 3rd upload.
       if ((i == 23) || (i == 42) || (i >= 200))
         do_check_eq(engine._tracker.changedIDs[id], i);
       else
         do_check_false(id in engine._tracker.changedIDs);
     }
 
   } finally {
+    Service.serverConfiguration = oldServerConfiguration;
     await promiseClean(engine, server);
   }
 });
 
 add_task(async function test_canDecrypt_noCryptoKeys() {
   _("SyncEngine.canDecrypt returns false if the engine fails to decrypt items on the server, e.g. due to a missing crypto key collection.");
 
   // Wipe collection keys so we can test the desired scenario.
--- a/services/sync/tests/unit/test_tab_engine.js
+++ b/services/sync/tests/unit/test_tab_engine.js
@@ -16,17 +16,17 @@ async function getMocks() {
   store.shouldSkipWindow = mockShouldSkipWindow;
   return [engine, store];
 }
 
 add_task(async function test_getOpenURLs() {
   _("Test getOpenURLs.");
   let [engine, store] = await getMocks();
 
-  let superLongURL = "http://" + (new Array(MAX_UPLOAD_BYTES).join("w")) + ".com/";
+  let superLongURL = "http://" + (new Array(URI_LENGTH_MAX).join("w")) + ".com/";
   let urls = ["http://bar.com", "http://foo.com", "http://foobar.com", superLongURL];
   function fourURLs() {
     return urls.pop();
   }
   store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, fourURLs, 1, 4);
 
   let matches;
 
--- a/services/sync/tests/unit/test_tab_store.js
+++ b/services/sync/tests/unit/test_tab_store.js
@@ -89,18 +89,19 @@ add_task(async function test_getAllTabs(
 
 add_task(async function test_createRecord() {
   let store = await getMockStore();
   let record;
 
   store.getTabState = mockGetTabState;
   store.shouldSkipWindow = mockShouldSkipWindow;
   store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
-
-  let numtabs = 2600; // Note: this number is connected to DEFAULT_MAX_RECORD_PAYLOAD_BYTES
+  // This number is sensitive to our hard-coded default max record payload size
+  // in service.js (256 * 1024)
+  let numtabs = 2600;
 
   store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, 1);
   record = await store.createRecord("fake-guid");
   ok(record instanceof TabSetRecord);
   equal(record.tabs.length, 1);
 
   _("create a big record");
   store.getWindowEnumerator = mockGetWindowEnumerator.bind(this, "http://foo.com", 1, numtabs);
--- a/tools/lint/eslint/modules.json
+++ b/tools/lint/eslint/modules.json
@@ -26,17 +26,17 @@
   "BootstrapMonitor.jsm": ["monitor"],
   "browser-loader.js": ["BrowserLoader"],
   "browserid_identity.js": ["BrowserIDManager", "AuthenticationError"],
   "CertUtils.jsm": ["BadCertHandler", "checkCert", "readCertPrefs", "validateCert"],
   "clients.js": ["ClientEngine", "ClientsRec"],
   "collection_repair.js": ["getRepairRequestor", "getAllRepairRequestors", "CollectionRepairRequestor", "getRepairResponder", "CollectionRepairResponder"],
   "collection_validator.js": ["CollectionValidator", "CollectionProblemData"],
   "Console.jsm": ["console", "ConsoleAPI"],
-  "constants.js": ["WEAVE_VERSION", "SYNC_API_VERSION", "USER_API_VERSION", "MISC_API_VERSION", "STORAGE_VERSION", "PREFS_BRANCH", "PWDMGR_HOST", "PWDMGR_PASSWORD_REALM", "PWDMGR_PASSPHRASE_REALM", "PWDMGR_KEYBUNDLE_REALM", "DEFAULT_KEYBUNDLE_NAME", "HMAC_INPUT", "SYNC_KEY_ENCODED_LENGTH", "SYNC_KEY_DECODED_LENGTH", "SYNC_KEY_HYPHENATED_LENGTH", "NO_SYNC_NODE_INTERVAL", "MAX_ERROR_COUNT_BEFORE_BACKOFF", "MAX_IGNORE_ERROR_COUNT", "MINIMUM_BACKOFF_INTERVAL", "MAXIMUM_BACKOFF_INTERVAL", "HMAC_EVENT_INTERVAL", "MASTER_PASSWORD_LOCKED_RETRY_INTERVAL", "DEFAULT_BLOCK_PERIOD", "DEFAULT_GUID_FETCH_BATCH_SIZE", "DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE", "DEFAULT_STORE_BATCH_SIZE", "HISTORY_STORE_BATCH_SIZE", "FORMS_STORE_BATCH_SIZE", "PASSWORDS_STORE_BATCH_SIZE", "ADDONS_STORE_BATCH_SIZE", "APPS_STORE_BATCH_SIZE", "DEFAULT_DOWNLOAD_BATCH_SIZE", "DEFAULT_MAX_RECORD_PAYLOAD_BYTES", "SINGLE_USER_THRESHOLD", "MULTI_DEVICE_THRESHOLD", "SCORE_INCREMENT_SMALL", "SCORE_INCREMENT_MEDIUM", "SCORE_INCREMENT_XLARGE", "SCORE_UPDATE_DELAY", "IDLE_OBSERVER_BACK_DELAY", "MAX_UPLOAD_RECORDS", "MAX_UPLOAD_BYTES", "MAX_HISTORY_UPLOAD", "MAX_HISTORY_DOWNLOAD", "NOTIFY_TAB_SENT_TTL_SECS", "STATUS_OK", "SYNC_FAILED", "LOGIN_FAILED", "SYNC_FAILED_PARTIAL", "CLIENT_NOT_CONFIGURED", "STATUS_DISABLED", "MASTER_PASSWORD_LOCKED", "LOGIN_SUCCEEDED", "SYNC_SUCCEEDED", "ENGINE_SUCCEEDED", "LOGIN_FAILED_NO_USERNAME", "LOGIN_FAILED_NO_PASSWORD", "LOGIN_FAILED_NO_PASSPHRASE", "LOGIN_FAILED_NETWORK_ERROR", "LOGIN_FAILED_SERVER_ERROR", "LOGIN_FAILED_INVALID_PASSPHRASE", "LOGIN_FAILED_LOGIN_REJECTED", "METARECORD_DOWNLOAD_FAIL", "VERSION_OUT_OF_DATE", "DESKTOP_VERSION_OUT_OF_DATE", "SETUP_FAILED_NO_PASSPHRASE", "CREDENTIALS_CHANGED", "ABORT_SYNC_COMMAND", "NO_SYNC_NODE_FOUND", "OVER_QUOTA", "PROLONGED_SYNC_FAILURE", "SERVER_MAINTENANCE", "RESPONSE_OVER_QUOTA", "ENGINE_UPLOAD_FAIL", "ENGINE_DOWNLOAD_FAIL", "ENGINE_UNKNOWN_FAIL", "ENGINE_APPLY_FAIL", "ENGINE_METARECORD_DOWNLOAD_FAIL", "ENGINE_METARECORD_UPLOAD_FAIL", "ENGINE_BATCH_INTERRUPTED", "JPAKE_ERROR_CHANNEL", "JPAKE_ERROR_NETWORK", "JPAKE_ERROR_SERVER", "JPAKE_ERROR_TIMEOUT", "JPAKE_ERROR_INTERNAL", "JPAKE_ERROR_INVALID", "JPAKE_ERROR_NODATA", "JPAKE_ERROR_KEYMISMATCH", "JPAKE_ERROR_WRONGMESSAGE", "JPAKE_ERROR_USERABORT", "JPAKE_ERROR_DELAYUNSUPPORTED", "INFO_COLLECTIONS", "INFO_COLLECTION_USAGE", "INFO_COLLECTION_COUNTS", "INFO_QUOTA", "kSyncNotConfigured", "kSyncMasterPasswordLocked", "kSyncWeaveDisabled", "kSyncNetworkOffline", "kSyncBackoffNotMet", "kFirstSyncChoiceNotMade", "FIREFOX_ID", "FENNEC_ID", "SEAMONKEY_ID", "TEST_HARNESS_ID", "MIN_PP_LENGTH", "MIN_PASS_LENGTH", "DEVICE_TYPE_DESKTOP", "DEVICE_TYPE_MOBILE", "SQLITE_MAX_VARIABLE_NUMBER"],
+  "constants.js": ["WEAVE_VERSION", "SYNC_API_VERSION", "USER_API_VERSION", "MISC_API_VERSION", "STORAGE_VERSION", "PREFS_BRANCH", "PWDMGR_HOST", "PWDMGR_PASSWORD_REALM", "PWDMGR_PASSPHRASE_REALM", "PWDMGR_KEYBUNDLE_REALM", "DEFAULT_KEYBUNDLE_NAME", "HMAC_INPUT", "SYNC_KEY_ENCODED_LENGTH", "SYNC_KEY_DECODED_LENGTH", "SYNC_KEY_HYPHENATED_LENGTH", "NO_SYNC_NODE_INTERVAL", "MAX_ERROR_COUNT_BEFORE_BACKOFF", "MAX_IGNORE_ERROR_COUNT", "MINIMUM_BACKOFF_INTERVAL", "MAXIMUM_BACKOFF_INTERVAL", "HMAC_EVENT_INTERVAL", "MASTER_PASSWORD_LOCKED_RETRY_INTERVAL", "DEFAULT_BLOCK_PERIOD", "DEFAULT_GUID_FETCH_BATCH_SIZE", "DEFAULT_MOBILE_GUID_FETCH_BATCH_SIZE", "DEFAULT_STORE_BATCH_SIZE", "HISTORY_STORE_BATCH_SIZE", "FORMS_STORE_BATCH_SIZE", "PASSWORDS_STORE_BATCH_SIZE", "ADDONS_STORE_BATCH_SIZE", "APPS_STORE_BATCH_SIZE", "DEFAULT_DOWNLOAD_BATCH_SIZE", "DEFAULT_MAX_RECORD_PAYLOAD_BYTES", "SINGLE_USER_THRESHOLD", "MULTI_DEVICE_THRESHOLD", "SCORE_INCREMENT_SMALL", "SCORE_INCREMENT_MEDIUM", "SCORE_INCREMENT_XLARGE", "SCORE_UPDATE_DELAY", "IDLE_OBSERVER_BACK_DELAY", "URI_LENGTH_MAX", "MAX_HISTORY_UPLOAD", "MAX_HISTORY_DOWNLOAD", "NOTIFY_TAB_SENT_TTL_SECS", "STATUS_OK", "SYNC_FAILED", "LOGIN_FAILED", "SYNC_FAILED_PARTIAL", "CLIENT_NOT_CONFIGURED", "STATUS_DISABLED", "MASTER_PASSWORD_LOCKED", "LOGIN_SUCCEEDED", "SYNC_SUCCEEDED", "ENGINE_SUCCEEDED", "LOGIN_FAILED_NO_USERNAME", "LOGIN_FAILED_NO_PASSWORD", "LOGIN_FAILED_NO_PASSPHRASE", "LOGIN_FAILED_NETWORK_ERROR", "LOGIN_FAILED_SERVER_ERROR", "LOGIN_FAILED_INVALID_PASSPHRASE", "LOGIN_FAILED_LOGIN_REJECTED", "METARECORD_DOWNLOAD_FAIL", "VERSION_OUT_OF_DATE", "DESKTOP_VERSION_OUT_OF_DATE", "SETUP_FAILED_NO_PASSPHRASE", "CREDENTIALS_CHANGED", "ABORT_SYNC_COMMAND", "NO_SYNC_NODE_FOUND", "OVER_QUOTA", "PROLONGED_SYNC_FAILURE", "SERVER_MAINTENANCE", "RESPONSE_OVER_QUOTA", "ENGINE_UPLOAD_FAIL", "ENGINE_DOWNLOAD_FAIL", "ENGINE_UNKNOWN_FAIL", "ENGINE_APPLY_FAIL", "ENGINE_METARECORD_DOWNLOAD_FAIL", "ENGINE_METARECORD_UPLOAD_FAIL", "ENGINE_BATCH_INTERRUPTED", "JPAKE_ERROR_CHANNEL", "JPAKE_ERROR_NETWORK", "JPAKE_ERROR_SERVER", "JPAKE_ERROR_TIMEOUT", "JPAKE_ERROR_INTERNAL", "JPAKE_ERROR_INVALID", "JPAKE_ERROR_NODATA", "JPAKE_ERROR_KEYMISMATCH", "JPAKE_ERROR_WRONGMESSAGE", "JPAKE_ERROR_USERABORT", "JPAKE_ERROR_DELAYUNSUPPORTED", "INFO_COLLECTIONS", "INFO_COLLECTION_USAGE", "INFO_COLLECTION_COUNTS", "INFO_QUOTA", "kSyncNotConfigured", "kSyncMasterPasswordLocked", "kSyncWeaveDisabled", "kSyncNetworkOffline", "kSyncBackoffNotMet", "kFirstSyncChoiceNotMade", "FIREFOX_ID", "FENNEC_ID", "SEAMONKEY_ID", "TEST_HARNESS_ID", "MIN_PP_LENGTH", "MIN_PASS_LENGTH", "DEVICE_TYPE_DESKTOP", "DEVICE_TYPE_MOBILE", "SQLITE_MAX_VARIABLE_NUMBER"],
   "Constants.jsm": ["Roles", "Events", "Relations", "Filters", "States", "Prefilters"],
   "ContactDB.jsm": ["ContactDB", "DB_NAME", "STORE_NAME", "SAVED_GETALL_STORE_NAME", "REVISION_STORE", "DB_VERSION"],
   "content-server.jsm": ["init"],
   "content.jsm": ["registerContentFrame"],
   "ContentCrashHandlers.jsm": ["TabCrashHandler", "PluginCrashReporter", "UnsubmittedCrashHandler"],
   "ContentObservers.js": [],
   "ContentPrefUtils.jsm": ["ContentPref", "cbHandleResult", "cbHandleError", "cbHandleCompletion", "safeCallback", "_methodsCallableFromChild"],
   "controller.js": ["MozMillController", "globalEventRegistry", "sleep", "windowMap"],