Bug 709424 - More robust handling of reconciling for duplicate records; r=rnewman
authorGregory Szorc <gps@mozilla.com>
Tue, 13 Dec 2011 15:46:54 -0800
changeset 82999 3969042ab0b0bbc4aa7bb40b5dddc86a96c0ea99
parent 82998 3bdbd5ea66175b5cc8bf8c20cbd4bf1061469c35
child 83000 80339f4dbaab84b960273e2b762ad79695551e3a
push id21715
push usergszorc@mozilla.com
push dateMon, 19 Dec 2011 20:28:27 +0000
treeherdermozilla-central@e2194521b6c0 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrnewman
bugs709424
milestone11.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 709424 - More robust handling of reconciling for duplicate records; r=rnewman
services/sync/modules/engines.js
services/sync/tests/unit/head_helpers.js
services/sync/tests/unit/test_syncengine_sync.js
--- a/services/sync/modules/engines.js
+++ b/services/sync/modules/engines.js
@@ -1051,17 +1051,17 @@ SyncEngine.prototype = {
       this._log.trace("Switching local id to incoming: " + [item.id, dupeId]);
       this._store.changeItemID(dupeId, item.id);
       this._deleteId(dupeId);
     }
   },
 
   // Reconcile incoming and existing records.  Return true if server
   // data should be applied.
-  _reconcile: function SyncEngine__reconcile(item) {
+  _reconcile: function SyncEngine__reconcile(item, dupePerformed) {
     if (this._log.level <= Log4Moz.Level.Trace)
       this._log.trace("Incoming: " + item);
 
     this._log.trace("Reconcile step 1: Check for conflicts");
     if (item.id in this._modified) {
       // If the incoming and local changes are the same, skip
       if (this._isEqual(item)) {
         delete this._modified[item.id];
@@ -1080,22 +1080,38 @@ SyncEngine.prototype = {
     this._log.trace("Reconcile step 2: Check for updates");
     if (this._store.itemExists(item.id))
       return !this._isEqual(item);
 
     this._log.trace("Reconcile step 2.5: Don't dupe deletes");
     if (item.deleted)
       return true;
 
+    // This shouldn't happen, but we protect ourself from infinite recursion.
+    if (dupePerformed) {
+      this._log.warn("Duplicate record not reconciled on second pass: " +
+                     item);
+      // We go ahead and apply it.
+      return true;
+    }
+
+    // When a dupe is detected, we feed the record (with a possibly changed ID)
+    // back through reconciling. If there are changes in both the local and
+    // incoming records, this should ensure that the proper record is used.
     this._log.trace("Reconcile step 3: Find dupes");
     let dupeId = this._findDupe(item);
-    if (dupeId)
+    if (dupeId) {
+      // _handleDupe() doesn't really handle anything. Instead, it just
+      // determines which GUID to use.
       this._handleDupe(item, dupeId);
+      this._log.debug("Reconciling de-duped record: " + item.id);
+      return this._reconcile(item, true);
+    }
 
-    // Apply the incoming item (now that the dupe is the right id)
+    // Apply the incoming item.
     return true;
   },
 
   // Upload outgoing records
   _uploadOutgoing: function SyncEngine__uploadOutgoing() {
     this._log.trace("Uploading local changes to server.");
     if (this._modifiedIDs.length) {
       this._log.trace("Preparing " + this._modifiedIDs.length +
--- a/services/sync/tests/unit/head_helpers.js
+++ b/services/sync/tests/unit/head_helpers.js
@@ -279,16 +279,22 @@ RotaryStore.prototype = {
   },
 
   itemExists: function Store_itemExists(id) {
     return (id in this.items);
   },
 
   createRecord: function(id, collection) {
     let record = new RotaryRecord(collection, id);
+
+    if (!(id in this.items)) {
+      record.deleted = true;
+      return record;
+    }
+
     record.denomination = this.items[id] || "Data for new record: " + id;
     return record;
   },
 
   changeItemID: function(oldID, newID) {
     this.items[newID] = this.items[oldID];
     delete this.items[oldID];
   },
@@ -322,15 +328,21 @@ function RotaryEngine() {
 }
 RotaryEngine.prototype = {
   __proto__: SyncEngine.prototype,
   _storeObj: RotaryStore,
   _trackerObj: RotaryTracker,
   _recordObj: RotaryRecord,
 
   _findDupe: function(item) {
+    // This is a semaphore used for testing proper reconciling on dupe
+    // detection.
+    if (item.id == "DUPE_INCOMING") {
+      return "DUPE_LOCAL";
+    }
+
     for (let [id, value] in Iterator(this._store.items)) {
       if (item.denomination == value) {
         return id;
       }
     }
   }
 };
--- a/services/sync/tests/unit/test_syncengine_sync.js
+++ b/services/sync/tests/unit/test_syncengine_sync.js
@@ -8,22 +8,24 @@ Cu.import("resource://services-sync/poli
 Cu.import("resource://services-sync/service.js");
 
 function makeRotaryEngine() {
   return new RotaryEngine();
 }
 
 function cleanAndGo(server) {
   Svc.Prefs.resetBranch("");
+  Svc.Prefs.set("log.logger.engine.rotary", "Trace");
   Records.clearCache();
   server.stop(run_next_test);
 }
 
 function run_test() {
   generateNewKeys();
+  Svc.Prefs.set("log.logger.engine.rotary", "Trace");
   run_next_test();
 }
 
 /*
  * Tests
  * 
  * SyncEngine._sync() is divided into four rather independent steps:
  *
@@ -353,18 +355,122 @@ add_test(function test_processIncoming_r
     do_check_neq(engine._delete.ids.indexOf('duplication'), -1);
 
     // The 'nukeme' record marked as deleted is removed.
     do_check_eq(engine._store.items.nukeme, undefined);
 
   } finally {
     cleanAndGo(server);
   }
+})
+
+add_test(function test_processIncoming_reconcile_deleted_dupe() {
+  _("Ensure that locally deleted duplicate record is handled properly.");
+
+  let engine = new RotaryEngine();
+
+  let contents = {
+    meta: {global: {engines: {rotary: {version: engine.version,
+                                       syncID:  engine.syncID}}}},
+    crypto: {},
+    rotary: {}
+  };
+
+  const USER = "foo";
+
+  Svc.Prefs.set("clusterURL", "http://localhost:8080/");
+  Svc.Prefs.set("username", USER);
+
+  let now = Date.now() / 1000 - 10;
+  engine.lastSync = now;
+  engine.lastModified = now + 1;
+
+  let server = new SyncServer();
+  server.registerUser(USER, "password");
+  server.createContents(USER, contents);
+  server.start();
+
+  let record = encryptPayload({id: "DUPE_INCOMING", denomination: "value"});
+  let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+  server.insertWBO(USER, "rotary", wbo);
+
+  // Simulate a locally-deleted item.
+  engine._store.items = {};
+  engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+  do_check_false(engine._store.itemExists("DUPE_LOCAL"));
+  do_check_false(engine._store.itemExists("DUPE_INCOMING"));
+  do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+
+  engine._sync();
+
+  // After the sync, nothing should exist since the local record had been
+  // deleted after the incoming record was updated. The server should also have
+  // deleted the incoming record. Since the local record only existed on the
+  // client at the beginning of the sync, it shouldn't exist on the server
+  // after.
+  do_check_empty(engine._store.items);
+
+  let collection = server.getCollection(USER, "rotary");
+  do_check_eq(1, collection.count());
+  do_check_eq(undefined, collection.payload("DUPE_INCOMING"));
+
+  cleanAndGo(server);
 });
 
+add_test(function test_processIncoming_reconcile_changed_dupe() {
+  _("Ensure that locally changed duplicate record is handled properly.");
+
+  let engine = new RotaryEngine();
+  let contents = {
+    meta: {global: {engines: {rotary: {version: engine.version,
+                                       syncID:  engine.syncID}}}},
+    crypto: {},
+    rotary: {}
+  };
+
+  const USER = "foo";
+  Svc.Prefs.set("clusterURL", "http://localhost:8080/");
+  Svc.Prefs.set("username", USER);
+
+  let now = Date.now() / 1000 - 10;
+  engine.lastSync = now;
+  engine.lastModified = now + 1;
+
+  let server = new SyncServer();
+  server.registerUser(USER, "password");
+  server.createContents(USER, contents);
+  server.start();
+
+  let record = encryptPayload({id: "DUPE_INCOMING", denomination: "incoming"});
+  let wbo = new ServerWBO("DUPE_INCOMING", record, now + 2);
+  server.insertWBO(USER, "rotary", wbo);
+
+  engine._store.create({id: "DUPE_LOCAL", denomination: "local"});
+  engine._tracker.addChangedID("DUPE_LOCAL", now + 3);
+  do_check_true(engine._store.itemExists("DUPE_LOCAL"));
+  do_check_eq("DUPE_LOCAL", engine._findDupe({id: "DUPE_INCOMING"}));
+
+  engine._sync();
+
+  do_check_attribute_count(engine._store.items, 1);
+  do_check_true("DUPE_LOCAL" in engine._store.items);
+
+  let collection = server.getCollection(USER, "rotary");
+  let wbo = collection.wbo("DUPE_INCOMING");
+  do_check_neq(undefined, wbo);
+  do_check_eq(undefined, wbo.payload);
+
+  let wbo = collection.wbo("DUPE_LOCAL");
+  do_check_neq(undefined, wbo);
+  do_check_neq(undefined, wbo.payload);
+  let payload = JSON.parse(JSON.parse(wbo.payload).ciphertext);
+  do_check_eq("local", payload.denomination);
+
+  cleanAndGo(server);
+});
 
 add_test(function test_processIncoming_mobile_batchSize() {
   _("SyncEngine._processIncoming doesn't fetch everything at once on mobile clients");
 
   let syncTesting = new SyncTestingInfrastructure();
   Svc.Prefs.set("clusterURL", "http://localhost:8080/");
   Svc.Prefs.set("username", "foo");
   Svc.Prefs.set("client.type", "mobile");