Bug 692714 - Part 1: handle node reassignment on all storage requests. r=philikon
authorRichard Newman <rnewman@mozilla.com>
Wed, 19 Oct 2011 17:54:55 -0700
changeset 79718 a8acc35de8a5a98beb830a497cd29986718a5a09
parent 79717 9982ff744731b4f8cd924af3a950345814342cc9
child 79719 a90cea9fee2113cf4ac0a849268a115fc287a07c
push id506
push userclegnitto@mozilla.com
push dateWed, 09 Nov 2011 02:03:18 +0000
treeherdermozilla-aurora@63587fc7bb93 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersphilikon
bugs692714
milestone10.0a1
Bug 692714 - Part 1: handle node reassignment on all storage requests. r=philikon
services/sync/modules/policies.js
services/sync/modules/service.js
services/sync/tests/unit/head_helpers.js
services/sync/tests/unit/test_errorhandler.js
services/sync/tests/unit/test_node_reassignment.js
services/sync/tests/unit/test_service_sync_401.js
services/sync/tests/unit/test_syncscheduler.js
services/sync/tests/unit/xpcshell.ini
--- a/services/sync/modules/policies.js
+++ b/services/sync/modules/policies.js
@@ -17,16 +17,17 @@
  * the Mozilla Foundation.
  * Portions created by the Initial Developer are Copyright (C) 2011
  * the Initial Developer. All Rights Reserved.
  *
  * Contributor(s):
  *  Marina Samuel <msamuel@mozilla.com>
  *  Philipp von Weitershausen <philipp@weitershausen.de>
  *  Chenxia Liu <liuche@mozilla.com>
+ *  Richard Newman <rnewman@mozilla.com>
  *
  * Alternatively, the contents of this file may be used under the terms of
  * either the GNU General Public License Version 2 or later (the "GPL"), or
  * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
  * in which case the provisions of the GPL or the LGPL are applicable instead
  * of those above. If you wish to allow use of your version of this file only
  * under the terms of either the GPL or the LGPL, and not to allow others to
  * use your version of this file under the terms of the MPL, indicate your
@@ -580,16 +581,23 @@ let ErrorHandler = {
         } else {
           this.notifyOnNextTick("weave:ui:sync:finish");
         }
 
         this.dontIgnoreErrors = false;
         break;
       case "weave:service:sync:finish":
         this._log.trace("Status.service is " + Status.service);
+
+        if (Status.sync == SYNC_SUCCEEDED) {
+          // Great. Let's clear our mid-sync 401 note.
+          this._log.trace("Clearing lastSyncReassigned.");
+          Svc.Prefs.reset("lastSyncReassigned");
+        }
+
         if (Status.service == SYNC_FAILED_PARTIAL) {
           this._log.debug("Some engines did not sync correctly.");
           this.resetFileLog(Svc.Prefs.get("log.appender.file.logOnError"),
                             LOG_PREFIX_ERROR);
 
           if (this.shouldReportError()) {
             this.dontIgnoreErrors = false;
             this.notifyOnNextTick("weave:ui:sync:error");
@@ -741,16 +749,25 @@ let ErrorHandler = {
 
     let lastSync = Svc.Prefs.get("lastSync");
     if (lastSync && ((Date.now() - Date.parse(lastSync)) >
         Svc.Prefs.get("errorhandler.networkFailureReportTimeout") * 1000)) {
       Status.sync = PROLONGED_SYNC_FAILURE;
       this._log.trace("shouldReportError: true (prolonged sync failure).");
       return true;
     }
+ 
+    // We got a 401 mid-sync. Wait for the next sync before actually handling
+    // an error. This assumes that we'll get a 401 again on a login fetch in
+    // order to report the error.
+    if (!Weave.Service.clusterURL) {
+      this._log.trace("shouldReportError: false (no cluster URL; " +
+                      "possible node reassignment).");
+      return false;
+    }
 
     return ([Status.login, Status.sync].indexOf(SERVER_MAINTENANCE) == -1 &&
             [Status.login, Status.sync].indexOf(LOGIN_FAILED_NETWORK_ERROR) == -1);
   },
 
   /**
    * Handle HTTP response results or exceptions and set the appropriate
    * Status.* bits.
@@ -760,17 +777,34 @@ let ErrorHandler = {
       case 400:
         if (resp == RESPONSE_OVER_QUOTA) {
           Status.sync = OVER_QUOTA;
         }
         break;
 
       case 401:
         Weave.Service.logout();
-        Status.login = LOGIN_FAILED_LOGIN_REJECTED;
+        this._log.info("Got 401 response; resetting clusterURL.");
+        Svc.Prefs.reset("clusterURL");
+
+        let delay = 0;
+        if (Svc.Prefs.get("lastSyncReassigned")) {
+          // We got a 401 in the middle of the previous sync, and we just got
+          // another. Login must have succeeded in order for us to get here, so
+          // the password should be correct.
+          // This is likely to be an intermittent server issue, so back off and
+          // give it time to recover.
+          this._log.warn("Last sync also failed for 401. Delaying next sync.");
+          delay = MINIMUM_BACKOFF_INTERVAL;
+        } else {
+          this._log.debug("New mid-sync 401 failure. Making a note.");
+          Svc.Prefs.set("lastSyncReassigned", true);
+        }
+        this._log.info("Attempting to schedule another sync.");
+        SyncScheduler.scheduleNextSync(delay);
         break;
 
       case 500:
       case 502:
       case 503:
       case 504:
         Status.enforceBackoff = true;
         if (resp.status == 503 && resp.headers["retry-after"]) {
--- a/services/sync/modules/service.js
+++ b/services/sync/modules/service.js
@@ -916,16 +916,17 @@ WeaveSvc.prototype = {
       CollectionKeys.clear();
 
       /* Login and sync. This also generates new keys. */
       this.sync();
       return true;
     }))(),
 
   startOver: function() {
+    this._log.trace("Invoking Service.startOver.");
     Svc.Obs.notify("weave:engine:stop-tracking");
     Status.resetSync();
 
     // We want let UI consumers of the following notification know as soon as
     // possible, so let's fake for the CLIENT_NOT_CONFIGURED status for now
     // by emptying the passphrase (we still need the password).
     Service.passphrase = "";
     Status.login = LOGIN_FAILED_NO_PASSPHRASE;
@@ -1488,32 +1489,28 @@ WeaveSvc.prototype = {
     this._ignorePrefObserver = false;
   },
 
   // Returns true if sync should proceed.
   // false / no return value means sync should be aborted.
   _syncEngine: function WeaveSvc__syncEngine(engine) {
     try {
       engine.sync();
-      return true;
     }
     catch(e) {
-      // Maybe a 401, cluster update perhaps needed?
       if (e.status == 401) {
-        // Log out and clear the cluster URL pref. That will make us perform
-        // cluster detection and password check on next sync, which handles
-        // both causes of 401s; in either case, we won't proceed with this
-        // sync so return false, but kick off a sync for next time.
-        this.logout();
-        Svc.Prefs.reset("clusterURL");
-        Utils.nextTick(this.sync, this);
+        // Maybe a 401, cluster update perhaps needed?
+        // We rely on ErrorHandler observing the sync failure notification to
+        // schedule another sync and clear node assignment values.
+        // Here we simply want to muffle the exception and return an
+        // appropriate value.
         return false;
       }
-      return true;
     }
+    return true;
   },
 
   /**
    * Silently fixes case issues.
    */
   syncKeyNeedsUpgrade: function syncKeyNeedsUpgrade() {
     let p = this.passphrase;
 
--- a/services/sync/tests/unit/head_helpers.js
+++ b/services/sync/tests/unit/head_helpers.js
@@ -12,16 +12,33 @@ let provider = {
       default:
         throw Cr.NS_ERROR_FAILURE;
     }
   },
   QueryInterface: XPCOMUtils.generateQI([Ci.nsIDirectoryServiceProvider])
 };
 Services.dirsvc.QueryInterface(Ci.nsIDirectoryService).registerProvider(provider);
 
+let timer;
+function waitForZeroTimer(callback) {
+  // First wait >100ms (nsITimers can take up to that much time to fire, so
+  // we can account for the timer in delayedAutoconnect) and then two event
+  // loop ticks (to account for the Utils.nextTick() in autoConnect).
+  let ticks = 2;
+  function wait() {
+    if (ticks) {
+      ticks -= 1;
+      Utils.nextTick(wait);
+      return;
+    }
+    callback();
+  }
+  timer = Utils.namedTimer(wait, 150, {}, "timer");
+}
+
 btoa = Cu.import("resource://services-sync/log4moz.js").btoa;
 function getTestLogger(component) {
   return Log4Moz.repository.getLogger("Testing");
 }
 
 function initTestLogging(level) {
   function LogStats() {
     this.errorsLogged = 0;
--- a/services/sync/tests/unit/test_errorhandler.js
+++ b/services/sync/tests/unit/test_errorhandler.js
@@ -132,26 +132,44 @@ add_test(function test_401_logout() {
   let server = sync_httpd_setup();
   setUp();
 
   // By calling sync, we ensure we're logged in.
   Service.sync();
   do_check_eq(Status.sync, SYNC_SUCCEEDED);
   do_check_true(Service.isLoggedIn);
 
+  Svc.Obs.add("weave:service:sync:error", onSyncError);
+  function onSyncError() {
+    _("Got weave:service:sync:error in first sync.");
+    Svc.Obs.remove("weave:service:sync:error", onSyncError);
+
+    // Wait for the automatic next sync.
+    function onLoginError() {
+      _("Got weave:service:login:error in second sync.");
+      Svc.Obs.remove("weave:service:login:error", onLoginError);
+
+      do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
+      do_check_false(Service.isLoggedIn);
+
+      // Clean up.
+      Utils.nextTick(function () {
+        Service.startOver();
+        server.stop(run_next_test);
+      });
+    }
+    Svc.Obs.add("weave:service:login:error", onLoginError);
+  }
+
   // Make sync fail due to login rejected.
   Service.username = "janedoe";
+
+  _("Starting first sync.");
   Service.sync();
-
-  do_check_eq(Status.login, LOGIN_FAILED_LOGIN_REJECTED);
-  do_check_false(Service.isLoggedIn);
-
-  // Clean up.
-  Service.startOver();
-  server.stop(run_next_test);
+  _("First sync done.");
 });
 
 add_test(function test_credentials_changed_logout() {
   let server = sync_httpd_setup();
   setUp();
 
   // By calling sync, we ensure we're logged in.
   Service.sync();
@@ -184,16 +202,20 @@ add_test(function test_no_lastSync_pref(
 
   run_next_test();
 });
 
 add_test(function test_shouldReportError() {
   Status.login = MASTER_PASSWORD_LOCKED;
   do_check_false(ErrorHandler.shouldReportError());
 
+  // Give ourselves a clusterURL so that the temporary 401 no-error situation
+  // doesn't come into play.
+  Service.clusterURL = "http://localhost:8080/";
+
   // Test dontIgnoreErrors, non-network, non-prolonged, login error reported
   Status.resetSync();
   setLastSync(NON_PROLONGED_ERROR_DURATION);
   ErrorHandler.dontIgnoreErrors = true;
   Status.login = LOGIN_FAILED_NO_PASSWORD;
   do_check_true(ErrorHandler.shouldReportError());
 
   // Test dontIgnoreErrors, non-network, non-prolonged, sync error reported
new file mode 100644
--- /dev/null
+++ b/services/sync/tests/unit/test_node_reassignment.js
@@ -0,0 +1,382 @@
+/* Any copyright is dedicated to the Public Domain.
+   http://creativecommons.org/publicdomain/zero/1.0/ */
+
+_("Test that node reassignment responses are respected on all kinds of " +
+  "requests.");
+
+// Don't sync any engines by default.
+Svc.DefaultPrefs.set("registerEngines", "")
+
+Cu.import("resource://services-sync/constants.js");
+Cu.import("resource://services-sync/policies.js");
+Cu.import("resource://services-sync/rest.js");
+Cu.import("resource://services-sync/service.js");
+Cu.import("resource://services-sync/status.js");
+Cu.import("resource://services-sync/log4moz.js");
+
+function run_test() {
+  Log4Moz.repository.getLogger("Sync.AsyncResource").level = Log4Moz.Level.Trace;
+  Log4Moz.repository.getLogger("Sync.ErrorHandler").level  = Log4Moz.Level.Trace;
+  Log4Moz.repository.getLogger("Sync.Resource").level      = Log4Moz.Level.Trace;
+  Log4Moz.repository.getLogger("Sync.RESTRequest").level   = Log4Moz.Level.Trace;
+  Log4Moz.repository.getLogger("Sync.Service").level       = Log4Moz.Level.Trace;
+  Log4Moz.repository.getLogger("Sync.SyncScheduler").level = Log4Moz.Level.Trace;
+  initTestLogging();
+
+  Engines.register(RotaryEngine);
+
+  // None of the failures in this file should result in a UI error.
+  function onUIError() {
+    do_throw("Errors should not be presented in the UI.");
+  }
+  Svc.Obs.add("weave:ui:login:error", onUIError);
+  Svc.Obs.add("weave:ui:sync:error", onUIError);
+
+  run_next_test();
+}
+
+/**
+ * Emulate the following Zeus config:
+ * $draining = data.get($prefix . $host . " draining");
+ * if ($draining == "drain.") {
+ *   log.warn($log_host_db_status . " migrating=1 (node-reassignment)" .
+ *            $log_suffix);
+ *   http.sendResponse("401 Node reassignment", $content_type,
+ *                     '"server request: node reassignment"', "");
+ * }
+ */
+const reassignBody = "\"server request: node reassignment\"";
+
+// API-compatible with SyncServer handler. Bind `handler` to something to use
+// as a ServerCollection handler.
+// We keep this format because in a patch or two we're going to switch to using
+// SyncServer.
+function handleReassign(handler, req, resp) {
+  resp.setStatusLine(req.httpVersion, 401, "Node reassignment");
+  resp.setHeader("Content-Type", "application/json");
+  resp.bodyOutputStream.write(reassignBody, reassignBody.length);
+}
+
+/**
+ * A node assignment handler.
+ */
+const newNodeBody = "http://localhost:8080/";
+function installNodeHandler(server, next) {
+  function handleNodeRequest(req, resp) {
+    _("Client made a request for a node reassignment.");
+    resp.setStatusLine(req.httpVersion, 200, "OK");
+    resp.setHeader("Content-Type", "text/plain");
+    resp.bodyOutputStream.write(newNodeBody, newNodeBody.length);
+    Utils.nextTick(next);
+  }
+  let nodePath = "/user/1.0/johndoe/node/weave";
+  server.registerPathHandler(nodePath, handleNodeRequest);
+  _("Registered node handler at " + nodePath);
+}
+
+/**
+ * Optionally return a 401 for the provided handler, if the value of `name` in
+ * the `reassignments` object is true.
+ */
+let reassignments = {
+  "crypto": false,
+  "info": false,
+  "meta": false,
+  "rotary": false
+};
+function maybeReassign(handler, name) {
+  return function (request, response) {
+    if (reassignments[name]) {
+      return handleReassign(null, request, response);
+    }
+    return handler(request, response);
+  };
+}
+
+function prepareServer() {
+  Service.username   = "johndoe";
+  Service.passphrase = "abcdeabcdeabcdeabcdeabcdea";
+  Service.password   = "ilovejane";
+  Service.serverURL  = "http://localhost:8080/";
+  Service.clusterURL = "http://localhost:8080/";
+
+  do_check_eq(Service.userAPI, "http://localhost:8080/user/1.0/");
+
+  let collectionsHelper = track_collections_helper();
+  let upd = collectionsHelper.with_updated_collection;
+  let collections = collectionsHelper.collections;
+
+  let engine  = Engines.get("rotary");
+  let engines = {rotary: {version: engine.version,
+                          syncID:  engine.syncID}};
+  let global  = new ServerWBO("global", {engines: engines});
+  let rotary  = new ServerCollection({}, true);
+  let clients = new ServerCollection({}, true);
+  let keys    = new ServerWBO("keys");
+
+  let rotaryHandler = maybeReassign(upd("rotary", rotary.handler()), "rotary");
+  let cryptoHandler = maybeReassign(upd("crypto", keys.handler()), "crypto");
+  let metaHandler   = maybeReassign(upd("meta", global.handler()), "meta");
+  let infoHandler   = maybeReassign(collectionsHelper.handler, "info");
+
+  let server = httpd_setup({
+    "/1.1/johndoe/storage/clients":     upd("clients", clients.handler()),
+    "/1.1/johndoe/storage/crypto/keys": cryptoHandler,
+    "/1.1/johndoe/storage/meta/global": metaHandler,
+    "/1.1/johndoe/storage/rotary":      rotaryHandler,
+    "/1.1/johndoe/info/collections":    infoHandler
+  });
+
+  return [server, global, rotary, collectionsHelper];
+}
+
+/**
+ * Make a test request to `url`, then watch the result of two syncs
+ * to ensure that a node request was made.
+ * Runs `undo` between the two.
+ */
+function syncAndExpectNodeReassignment(server, firstNotification, undo,
+                                       secondNotification, url) {
+  function onwards() {
+    let nodeFetched = false;
+    function onFirstSync() {
+      _("First sync completed.");
+      Svc.Obs.remove(firstNotification, onFirstSync);
+      Svc.Obs.add(secondNotification, onSecondSync);
+
+      do_check_eq(Service.clusterURL, "");
+
+      // Track whether we fetched node/weave. We want to wait for the second
+      // sync to finish so that we're cleaned up for the next test, so don't
+      // run_next_test in the node handler.
+      nodeFetched = false;
+
+      // Verify that the client requests a node reassignment.
+      // Install a node handler to watch for these requests.
+      installNodeHandler(server, function () {
+        nodeFetched = true;
+      });
+
+      // Allow for tests to clean up error conditions.
+      _("Undoing test changes.");
+      undo();
+    }
+    function onSecondSync() {
+      _("Second sync completed.");
+      Svc.Obs.remove(secondNotification, onSecondSync);
+      SyncScheduler.clearSyncTriggers();
+
+      // Make absolutely sure that any event listeners are done with their work
+      // before we proceed.
+      waitForZeroTimer(function () {
+        _("Second sync nextTick.");
+        do_check_true(nodeFetched);
+        Service.startOver();
+        server.stop(run_next_test);
+      });
+    }
+
+    Svc.Obs.add(firstNotification, onFirstSync);
+    Service.sync();
+  }
+
+  // Make sure that it works!
+  let request = new RESTRequest(url);
+  request.get(function () {
+    do_check_eq(request.response.status, 401);
+    Utils.nextTick(onwards);
+  });
+}
+
+add_test(function test_momentary_401_engine() {
+  _("Test a failure for engine URLs that's resolved by reassignment.");
+  let [server, global, rotary, collectionsHelper] = prepareServer();
+
+  _("Enabling the Rotary engine.");
+  let engine = Engines.get("rotary");
+  engine.enabled = true;
+
+  // We need the server to be correctly set up prior to experimenting. Do this
+  // through a sync.
+  let g = {syncID: Service.syncID,
+           storageVersion: STORAGE_VERSION,
+           rotary: {version: engine.version,
+                    syncID:  engine.syncID}}
+
+  global.payload = JSON.stringify(g);
+  global.modified = new_timestamp();
+  collectionsHelper.update_collection("meta", global.modified);
+
+  _("First sync to prepare server contents.");
+  Service.sync();
+
+  _("Setting up Rotary collection to 401.");
+  reassignments["rotary"] = true;
+
+  // We want to verify that the clusterURL pref has been cleared after a 401
+  // inside a sync. Flag the Rotary engine to need syncing.
+  rotary.modified = new_timestamp() + 10;
+  collectionsHelper.update_collection("rotary", rotary.modified);
+
+  function undo() {
+    reassignments["rotary"] = false;
+  }
+
+  syncAndExpectNodeReassignment(server,
+                                "weave:service:sync:finish",
+                                undo,
+                                "weave:service:sync:finish",
+                                Service.storageURL + "rotary");
+});
+
+// This test ends up being a failing fetch *after we're already logged in*.
+add_test(function test_momentary_401_info_collections() {
+  _("Test a failure for info/collections that's resolved by reassignment.");
+  let [server, global, rotary] = prepareServer();
+
+  _("First sync to prepare server contents.");
+  Service.sync();
+
+  // Return a 401 for info/collections requests.
+  reassignments["info"] = true;
+
+  function undo() {
+    reassignments["info"] = false;
+  }
+
+  syncAndExpectNodeReassignment(server,
+                                "weave:service:sync:error",
+                                undo,
+                                "weave:service:sync:finish",
+                                Service.infoURL);
+});
+
+add_test(function test_momentary_401_storage() {
+  _("Test a failure for any storage URL, not just engine parts. " +
+    "Resolved by reassignment.");
+  let [server, global, rotary] = prepareServer();
+
+  // Return a 401 for all storage requests.
+  reassignments["crypto"] = true;
+  reassignments["meta"]   = true;
+  reassignments["rotary"] = true;
+
+  function undo() {
+    reassignments["crypto"] = false;
+    reassignments["meta"]   = false;
+    reassignments["rotary"] = false;
+  }
+
+  syncAndExpectNodeReassignment(server,
+                                "weave:service:login:error",
+                                undo,
+                                "weave:service:sync:finish",
+                                Service.storageURL + "meta/global");
+});
+
+add_test(function test_loop_avoidance() {
+  _("Test that a repeated failure doesn't result in a sync loop " +
+    "if node reassignment cannot resolve the failure.");
+
+  let [server, global, rotary] = prepareServer();
+
+  // Return a 401 for all storage requests.
+  reassignments["crypto"] = true;
+  reassignments["meta"]   = true;
+  reassignments["rotary"] = true;
+
+  let firstNotification  = "weave:service:login:error";
+  let secondNotification = "weave:service:login:error";
+  let thirdNotification  = "weave:service:sync:finish";
+
+  let nodeFetched = false;
+
+  // Track the time. We want to make sure the duration between the first and
+  // second sync is small, and then that the duration between second and third
+  // is set to be large.
+  let now;
+
+  function getReassigned() {
+    return Services.prefs.getBoolPref("services.sync.lastSyncReassigned");
+  }
+
+  function onFirstSync() {
+    _("First sync completed.");
+    Svc.Obs.remove(firstNotification, onFirstSync);
+    Svc.Obs.add(secondNotification, onSecondSync);
+
+    do_check_eq(Service.clusterURL, "");
+
+    // We got a 401 mid-sync, and set the pref accordingly.
+    do_check_true(Services.prefs.getBoolPref("services.sync.lastSyncReassigned"));
+
+    // Track whether we fetched node/weave. We want to wait for the second
+    // sync to finish so that we're cleaned up for the next test, so don't
+    // run_next_test in the node handler.
+    nodeFetched = false;
+
+    // Verify that the client requests a node reassignment.
+    // Install a node handler to watch for these requests.
+    installNodeHandler(server, function () {
+      nodeFetched = true;
+    });
+
+    // Update the timestamp.
+    now = Date.now();
+  }
+
+  function onSecondSync() {
+    _("Second sync completed.");
+    Svc.Obs.remove(secondNotification, onSecondSync);
+    Svc.Obs.add(thirdNotification, onThirdSync);
+
+    // This sync occurred within the backoff interval.
+    let elapsedTime = Date.now() - now;
+    do_check_true(elapsedTime < MINIMUM_BACKOFF_INTERVAL);
+
+    // This pref will be true until a sync completes successfully.
+    do_check_true(getReassigned());
+
+    // The timer will be set for some distant time.
+    // We store nextSync in prefs, which offers us only limited resolution.
+    // Include that logic here.
+    let expectedNextSync = 1000 * Math.floor((now + MINIMUM_BACKOFF_INTERVAL) / 1000);
+    _("Next sync scheduled for " + SyncScheduler.nextSync);
+    _("Expected to be slightly greater than " + expectedNextSync);
+
+    do_check_true(SyncScheduler.nextSync >= expectedNextSync);
+    do_check_true(!!SyncScheduler.syncTimer);
+
+    // Undo our evil scheme.
+    reassignments["crypto"] = false;
+    reassignments["meta"]   = false;
+    reassignments["rotary"] = false;
+
+    // Bring the timer forward to kick off a successful sync, so we can watch
+    // the pref get cleared.
+    SyncScheduler.scheduleNextSync(0);
+  }
+  function onThirdSync() {
+    Svc.Obs.remove(thirdNotification, onThirdSync);
+
+    // That'll do for now; no more syncs.
+    SyncScheduler.clearSyncTriggers();
+
+    // Make absolutely sure that any event listeners are done with their work
+    // before we proceed.
+    waitForZeroTimer(function () {
+      _("Third sync nextTick.");
+
+      // A missing pref throws.
+      do_check_throws(getReassigned, Cr.NS_ERROR_UNEXPECTED);
+      do_check_true(nodeFetched);
+      Service.startOver();
+      server.stop(run_next_test);
+    });
+  }
+
+  Svc.Obs.add(firstNotification, onFirstSync);
+
+  now = Date.now();
+  Service.sync();
+});
--- a/services/sync/tests/unit/test_service_sync_401.js
+++ b/services/sync/tests/unit/test_service_sync_401.js
@@ -46,31 +46,37 @@ function run_test() {
       threw = true;
     });
 
     _("Initial state: We're successfully logged in.");
     Weave.Service.login();
     do_check_true(Weave.Service.isLoggedIn);
     do_check_eq(Weave.Status.login, Weave.LOGIN_SUCCEEDED);
 
-    _("Simulate having changed the password somehwere else.");
+    _("Simulate having changed the password somewhere else.");
     Weave.Service.password = "ilovejosephine";
 
     _("Let's try to sync.");
     Weave.Service.sync();
 
     _("Verify that sync() threw an exception.");
     do_check_true(threw);
 
     _("We're no longer logged in.");
     do_check_false(Weave.Service.isLoggedIn);
 
-    _("Sync status.");
-    do_check_eq(Weave.Status.login, Weave.LOGIN_FAILED_LOGIN_REJECTED);
+    _("Sync status won't have changed yet, because we haven't tried again.");
 
     _("globalScore is reset upon starting a sync.");
     do_check_eq(SyncScheduler.globalScore, 0);
 
+    _("Our next sync will fail appropriately.");
+    try {
+      Weave.Service.sync();
+    } catch (ex) {
+    }
+    do_check_eq(Weave.Status.login, Weave.LOGIN_FAILED_LOGIN_REJECTED);
+
   } finally {
     Weave.Svc.Prefs.resetBranch("");
     server.stop(do_test_finished);
   }
 }
--- a/services/sync/tests/unit/test_syncscheduler.js
+++ b/services/sync/tests/unit/test_syncscheduler.js
@@ -66,33 +66,16 @@ function cleanUpAndGo(server) {
     if (server) {
       server.stop(run_next_test);
     } else {
       run_next_test();
     }
   });
 }
 
-let timer;
-function waitForZeroTimer(callback) {
-  // First wait >100ms (nsITimers can take up to that much time to fire, so
-  // we can account for the timer in delayedAutoconnect) and then two event
-  // loop ticks (to account for the Utils.nextTick() in autoConnect).
-  let ticks = 2;
-  function wait() {
-    if (ticks) {
-      ticks -= 1;
-      Utils.nextTick(wait);
-      return;
-    }
-    callback();
-  }
-  timer = Utils.namedTimer(wait, 150, {}, "timer");
-}
-
 function run_test() {
   initTestLogging("Trace");
 
   Log4Moz.repository.getLogger("Sync.Service").level = Log4Moz.Level.Trace;
   Log4Moz.repository.getLogger("Sync.SyncScheduler").level = Log4Moz.Level.Trace;
 
   run_next_test();
 }
--- a/services/sync/tests/unit/xpcshell.ini
+++ b/services/sync/tests/unit/xpcshell.ini
@@ -43,16 +43,17 @@ skip-if = (os == "mac" && debug) || os =
 [test_interval_triggers.js]
 [test_jpakeclient.js]
 # Bug 618233: this test produces random failures on Windows 7.
 # Bug 676978: test hangs on Android (see also testing/xpcshell/xpcshell.ini)
 skip-if = os == "win" || os == "android"
 [test_keys.js]
 [test_load_modules.js]
 [test_log4moz.js]
+[test_node_reassignment.js]
 [test_notifications.js]
 [test_password_store.js]
 [test_password_tracker.js]
 [test_places_guid_downgrade.js]
 [test_prefs_store.js]
 [test_prefs_tracker.js]
 [test_records_crypto.js]
 [test_records_crypto_generateEntry.js]