Bug 1033126 - Convert xpcshell tests to use mozlog.structured format for logging.;r=ted
authorChris Manchester <cmanchester@mozilla.com>
Wed, 22 Oct 2014 15:53:42 -0400
changeset 212460 b2dc4f1dc9b38b9de7dbf3cad503f31dce50b04c
parent 212459 532df34a8482d12b362da4efd0e57fe716f02e60
child 212461 bbc43d1d869178462dcdceb21bd5c79d3ea8516a
push id50976
push usercmanchester@mozilla.com
push dateMon, 27 Oct 2014 19:29:40 +0000
treeherdermozilla-inbound@b2dc4f1dc9b3 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersted
bugs1033126
milestone36.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1033126 - Convert xpcshell tests to use mozlog.structured format for logging.;r=ted
dom/encoding/test/unit/head.js
dom/network/tests/unit/test_multisend.js
dom/network/tests/unit/test_tcpsocket.js
netwerk/test/unit/test_net_addr.js
services/sync/modules-testing/utils.js
testing/modules/StructuredLog.jsm
testing/xpcshell/head.js
testing/xpcshell/mach_commands.py
testing/xpcshell/remotexpcshelltests.py
testing/xpcshell/runtestsb2g.py
testing/xpcshell/runxpcshelltests.py
testing/xpcshell/selftest.py
toolkit/components/feeds/test/test_xml.js
--- a/dom/encoding/test/unit/head.js
+++ b/dom/encoding/test/unit/head.js
@@ -83,13 +83,13 @@ let tests = [];
 
 function test(func, msg) {
   tests.push({msg: msg, func: func,
               filename: Components.stack.caller.filename });
 }
 
 function run_test() {
   tests.forEach(function(t) {
-    _log("test_info", {source_file: t.filename,
-                       diagnostic: "test group: " + t.msg});
+    do_print("test group: " + t.msg,
+             {source_file: t.filename});
     t.func();
   });
 };
--- a/dom/network/tests/unit/test_multisend.js
+++ b/dom/network/tests/unit/test_multisend.js
@@ -24,17 +24,17 @@ const ServerSocket = CC("@mozilla.org/ne
 var server = null, sock = null;
 
 /**
  * Spin up a listening socket and associate at most one live, accepted socket
  * with ourselves.
  */
 function TestServer() {
   this.listener = ServerSocket(-1, true, -1);
-  do_print('server: listening on', this.listener.port);
+  do_print('server: listening on ' + this.listener.port);
   this.listener.asyncListen(this);
 
   this.binaryInput = null;
   this.input = null;
   this.binaryOutput = null;
   this.output = null;
 
   this.onaccept = null;
@@ -144,9 +144,9 @@ function run_test() {
     }
   };
   server.onclose = function() {};
 
   sock.onopen = function() {
     sock.send(ok.buffer, 0, 2);
     sock.send(ok.buffer, 2, 3);
   };
-}
\ No newline at end of file
+}
--- a/dom/network/tests/unit/test_tcpsocket.js
+++ b/dom/network/tests/unit/test_tcpsocket.js
@@ -86,17 +86,17 @@ function is_content() {
 }
 
 /**
  * Spin up a listening socket and associate at most one live, accepted socket
  * with ourselves.
  */
 function TestServer() {
   this.listener = ServerSocket(-1, true, -1);
-  do_print('server: listening on', this.listener.port);
+  do_print('server: listening on ' + this.listener.port);
   this.listener.asyncListen(this);
 
   this.binaryInput = null;
   this.input = null;
   this.binaryOutput = null;
   this.output = null;
 
   this.onconnect = null;
--- a/netwerk/test/unit/test_net_addr.js
+++ b/netwerk/test/unit/test_net_addr.js
@@ -20,17 +20,17 @@ const ServerSocket = CC("@mozilla.org/ne
  */
 function TestServer() {
   this.reset();
 
   // start server.
   // any port (-1), loopback only (true), default backlog (-1)
   this.listener = ServerSocket(-1, true, -1);
   this.port = this.listener.port;
-  do_print('server: listening on', this.port);
+  do_print('server: listening on ' + this.port);
   this.listener.asyncListen(this);
 }
 
 TestServer.prototype = {
   onSocketAccepted: function(socket, trans) {
     do_print('server: got client connection');
 
     // one connection at a time.
--- a/services/sync/modules-testing/utils.js
+++ b/services/sync/modules-testing/utils.js
@@ -231,18 +231,17 @@ this.encryptPayload = function encryptPa
 // * The test itself should be passed as 'test' - ie, test code will generally
 //   pass |this|.
 // * The test function is a regular test function - although note that it must
 //   be a generator - async operations should yield them, and run_next_test
 //   mustn't be called.
 this.add_identity_test = function(test, testFunction) {
   function note(what) {
     let msg = "running test " + testFunction.name + " with " + what + " identity manager";
-    test._log("test_info",
-              {_message: "TEST-INFO | | " + msg + "\n"});
+    test.do_print(msg);
   }
   let ns = {};
   Cu.import("resource://services-sync/service.js", ns);
   // one task for the "old" identity manager.
   test.add_task(function() {
     note("sync");
     let oldIdentity = Status._authManager;
     ensureLegacyIdentityManager();
--- a/testing/modules/StructuredLog.jsm
+++ b/testing/modules/StructuredLog.jsm
@@ -100,26 +100,31 @@ StructuredLogger.prototype.suiteStart = 
   this._logData("suite_start", data);
 };
 
 StructuredLogger.prototype.suiteEnd = function () {
   this._logData("suite_end");
 };
 
 /**
- * Unstructured logging functions
+ * Unstructured logging functions. The "extra" parameter can always by used to
+ * log suite specific data. If a "stack" field is provided it is logged at the
+ * top level of the data object for the benefit of mozlog's formatters.
  */
 StructuredLogger.prototype.log = function (level, message, extra=null) {
   let data = {
     level: level,
     message: message,
   };
 
   if (extra !== null) {
     data.extra = extra;
+    if ("stack" in extra) {
+      data.stack = extra.stack;
+    }
   }
 
   this._logData("log", data);
 }
 
 StructuredLogger.prototype.debug = function (message, extra=null) {
   this.log("DEBUG", message, extra);
 }
--- a/testing/xpcshell/head.js
+++ b/testing/xpcshell/head.js
@@ -8,18 +8,16 @@
  * This file contains common code that is loaded before each test file(s).
  * See http://developer.mozilla.org/en/docs/Writing_xpcshell-based_unit_tests
  * for more information.
  */
 
 var _quit = false;
 var _passed = true;
 var _tests_pending = 0;
-var _passedChecks = 0, _falsePassedChecks = 0;
-var _todoChecks = 0;
 var _cleanupFunctions = [];
 var _pendingTimers = [];
 var _profileInitialized = false;
 
 // Register the testing-common resource protocol early, to have access to its
 // modules.
 _register_modules_protocol_handler();
 
@@ -31,33 +29,29 @@ let AssertCls = Components.utils.import(
 let Assert = new AssertCls(function(err, message, stack) {
   if (err) {
     do_report_result(false, err.message, err.stack);
   } else {
     do_report_result(true, message, stack);
   }
 });
 
-let _log = function (action, params) {
+
+let _add_params = function (params) {
   if (typeof _XPCSHELL_PROCESS != "undefined") {
-    params.process = _XPCSHELL_PROCESS;
+    params.xpcshell_process = _XPCSHELL_PROCESS;
   }
-  params.action = action;
-  params._time = Date.now();
-  dump("\n" + JSON.stringify(params) + "\n");
+};
+
+let _dumpLog = function (raw_msg) {
+  dump("\n" + raw_msg + "\n");
 }
 
-function _dump(str) {
-  let start = /^TEST-/.test(str) ? "\n" : "";
-  if (typeof _XPCSHELL_PROCESS == "undefined") {
-    dump(start + str);
-  } else {
-    dump(start + _XPCSHELL_PROCESS + ": " + str);
-  }
-}
+let _LoggerClass = Components.utils.import("resource://testing-common/StructuredLog.jsm", null).StructuredLogger;
+let _testLogger = new _LoggerClass("xpcshell/head.js", _dumpLog, [_add_params]);
 
 // Disable automatic network detection, so tests work correctly when
 // not connected to a network.
 let (ios = Components.classes["@mozilla.org/network/io-service;1"]
            .getService(Components.interfaces.nsIIOService2)) {
   ios.manageOfflineStatus = false;
   ios.offline = false;
 }
@@ -176,64 +170,34 @@ function _Timer(func, delay) {
     do_timeout(newDelay, this._func);
   }
 };
 
 function _do_main() {
   if (_quit)
     return;
 
-  _log("test_info",
-       {_message: "TEST-INFO | (xpcshell/head.js) | running event loop\n"});
+  _testLogger.info("running event loop");
 
   var thr = Components.classes["@mozilla.org/thread-manager;1"]
                       .getService().currentThread;
 
   while (!_quit)
     thr.processNextEvent(true);
 
   while (thr.hasPendingEvents())
     thr.processNextEvent(true);
 }
 
 function _do_quit() {
-  _log("test_info",
-       {_message: "TEST-INFO | (xpcshell/head.js) | exiting test\n"});
+  _testLogger.info("exiting test");
   _Promise.Debugging.flushUncaughtErrors();
   _quit = true;
 }
 
-function _format_exception_stack(stack) {
-  if (typeof stack == "object" && stack.caller) {
-    let frame = stack;
-    let strStack = "";
-    while (frame != null) {
-      strStack += frame + "\n";
-      frame = frame.caller;
-    }
-    stack = strStack;
-  }
-  // frame is of the form "fname@file:line"
-  let frame_regexp = new RegExp("(.*)@(.*):(\\d*)", "g");
-  return stack.split("\n").reduce(function(stack_msg, frame) {
-    if (frame) {
-      let parts = frame_regexp.exec(frame);
-      if (parts) {
-        let [ _, func, file, line ] = parts;
-        return stack_msg + "JS frame :: " + file + " :: " +
-          (func || "anonymous") + " :: line " + line + "\n";
-      }
-      else { /* Could be a -e (command line string) style location. */
-        return stack_msg + "JS frame :: " + frame + "\n";
-      }
-    }
-    return stack_msg;
-  }, "");
-}
-
 /**
  * Overrides idleService with a mock.  Idle is commonly used for maintenance
  * tasks, thus if a test uses a service that requires the idle service, it will
  * start handling them.
  * This behaviour would cause random failures and slowdown tests execution,
  * for example by running database vacuum or cleanups for each test.
  *
  * @note Idle service is overridden by default.  If a test requires it, it will
@@ -378,18 +342,21 @@ function _execute_test() {
   // Override idle service by default.
   // Call do_get_idle() to restore the factory and get the service.
   _fakeIdleService.activate();
 
   _Promise.Debugging.clearUncaughtErrorObservers();
   _Promise.Debugging.addUncaughtErrorObserver(function observer({message, date, fileName, stack, lineNumber}) {
     let text = " A promise chain failed to handle a rejection: " +
         message + " - rejection date: " + date;
-    _log_message_with_stack("test_unexpected_fail",
-                            text, stack, fileName);
+    _testLogger.error(text,
+                      {
+                        stack: _format_stack(stack),
+                        source_file: fileName
+                      });
   });
 
   // _HEAD_FILES is dynamically defined by <runxpcshelltests.py>.
   _load_files(_HEAD_FILES);
   // _TEST_FILE is dynamically defined by <runxpcshelltests.py>.
   _load_files(_TEST_FILE);
 
   // Tack Assert.jsm methods to the current scope.
@@ -406,31 +373,30 @@ function _execute_test() {
   } catch (e) {
     _passed = false;
     // do_check failures are already logged and set _quit to true and throw
     // NS_ERROR_ABORT. If both of those are true it is likely this exception
     // has already been logged so there is no need to log it again. It's
     // possible that this will mask an NS_ERROR_ABORT that happens after a
     // do_check failure though.
     if (!_quit || e != Components.results.NS_ERROR_ABORT) {
-      let msgObject = {};
+      let extra = {};
       if (e.fileName) {
-        msgObject.source_file = e.fileName;
+        extra.source_file = e.fileName;
         if (e.lineNumber) {
-          msgObject.line_number = e.lineNumber;
+          extra.line_number = e.lineNumber;
         }
       } else {
-        msgObject.source_file = "xpcshell/head.js";
+        extra.source_file = "xpcshell/head.js";
       }
-      msgObject.diagnostic = _exception_message(e);
+      let message = _exception_message(e);
       if (e.stack) {
-        msgObject.diagnostic += " - See following stack:\n";
-        msgObject.stack = _format_exception_stack(e.stack);
+        extra.stack = _format_stack(e.stack);
       }
-      _log("test_unexpected_fail", msgObject);
+      _testLogger.error(message, extra);
     }
   }
 
   // _TAIL_FILES is dynamically defined by <runxpcshelltests.py>.
   _load_files(_TAIL_FILES);
 
   // Execute all of our cleanup functions.
   let reportCleanupError = function(ex) {
@@ -440,18 +406,21 @@ function _execute_test() {
     } else {
       stack = Components.stack.caller;
     }
     if (stack instanceof Components.interfaces.nsIStackFrame) {
       filename = stack.filename;
     } else if (ex.fileName) {
       filename = ex.fileName;
     }
-    _log_message_with_stack("test_unexpected_fail",
-                            ex, stack, filename);
+    _testLogger.error(_exception_message(ex),
+                      {
+                        stack: _format_stack(stack),
+                        source_file: filename
+                      });
   };
 
   let func;
   while ((func = _cleanupFunctions.pop())) {
     let result;
     try {
       result = func();
     } catch (ex) {
@@ -472,81 +441,54 @@ function _execute_test() {
     }
   }
 
   // Restore idle service to avoid leaks.
   _fakeIdleService.deactivate();
 
   if (!_passed)
     return;
-
-  var truePassedChecks = _passedChecks - _falsePassedChecks;
-  if (truePassedChecks > 0) {
-    _log("test_pass",
-         {_message: "TEST-PASS | (xpcshell/head.js) | " + truePassedChecks + " (+ " +
-                    _falsePassedChecks + ") check(s) passed\n",
-          source_file: _TEST_FILE,
-          passed_checks: truePassedChecks});
-    _log("test_info",
-         {_message: "TEST-INFO | (xpcshell/head.js) | " + _todoChecks +
-                    " check(s) todo\n",
-          source_file: _TEST_FILE,
-          todo_checks: _todoChecks});
-  } else {
-    // ToDo: switch to TEST-UNEXPECTED-FAIL when all tests have been updated. (Bug 496443)
-    _log("test_info",
-         {_message: "TEST-INFO | (xpcshell/head.js) | No (+ " + _falsePassedChecks +
-                    ") checks actually run\n",
-         source_file: _TEST_FILE});
-  }
 }
 
 /**
  * Loads files.
  *
  * @param aFiles Array of files to load.
  */
 function _load_files(aFiles) {
-  function loadTailFile(element, index, array) {
+  function load_file(element, index, array) {
     try {
       load(element);
-    } catch (e if e instanceof SyntaxError) {
-      _log("javascript_error",
-           {_message: "TEST-UNEXPECTED-FAIL | (xpcshell/head.js) | Source file " + element + " contains SyntaxError",
-            diagnostic: _exception_message(e),
-            source_file: element,
-            stack: _format_exception_stack(e.stack)});
     } catch (e) {
-      _log("javascript_error",
-           {_message: "TEST-UNEXPECTED-FAIL | (xpcshell/head.js) | Source file " + element + " contains an error",
-            diagnostic: _exception_message(e),
-            source_file: element,
-            stack: e.stack ? _format_exception_stack(e.stack) : null});
+      let extra = {
+        source_file: element
+      }
+      if (e.stack) {
+        extra.stack = _format_stack(e.stack);
+      }
+      _testLogger.error(_exception_message(e), extra);
     }
   }
 
-  aFiles.forEach(loadTailFile);
+  aFiles.forEach(load_file);
 }
 
 function _wrap_with_quotes_if_necessary(val) {
   return typeof val == "string" ? '"' + val + '"' : val;
 }
 
 /************** Functions to be used from the tests **************/
 
 /**
  * Prints a message to the output log.
  */
-function do_print(msg) {
-  var caller_stack = Components.stack.caller;
+function do_print(msg, data) {
   msg = _wrap_with_quotes_if_necessary(msg);
-  _log("test_info",
-       {source_file: caller_stack.filename,
-        diagnostic: msg});
-
+  data = data ? data : null;
+  _testLogger.info(msg, data);
 }
 
 /**
  * Calls the given function at least the specified number of milliseconds later.
  * The callback will not undershoot the given time, but it might overshoot --
  * don't expect precision!
  *
  * @param delay : uint
@@ -570,26 +512,23 @@ function do_execute_soon(callback, aName
         callback();
       } catch (e) {
         // do_check failures are already logged and set _quit to true and throw
         // NS_ERROR_ABORT. If both of those are true it is likely this exception
         // has already been logged so there is no need to log it again. It's
         // possible that this will mask an NS_ERROR_ABORT that happens after a
         // do_check failure though.
         if (!_quit || e != Components.results.NS_ERROR_ABORT) {
-          if (e.stack) {
-            _log("javascript_error",
-                 {source_file: "xpcshell/head.js",
-                  diagnostic: _exception_message(e) + " - See following stack:",
-                  stack: _format_exception_stack(e.stack)});
-          } else {
-            _log("javascript_error",
-                 {source_file: "xpcshell/head.js",
-                  diagnostic: _exception_message(e)});
-          }
+          let stack = e.stack ? _format_stack(e.stack) : null;
+          _testLogger.testStatus(_TEST_NAME,
+                                 funcName,
+                                 'ERROR',
+                                 'OK',
+                                 _exception_message(e),
+                                 stack);
           _do_quit();
         }
       }
       finally {
         do_test_finished(funcName);
       }
     }
   }, Components.interfaces.nsIThread.DISPATCH_NORMAL);
@@ -608,19 +547,26 @@ function do_throw(error, stack) {
   // otherwise get it from our call context
   stack = stack || error.stack || Components.stack.caller;
 
   if (stack instanceof Components.interfaces.nsIStackFrame)
     filename = stack.filename;
   else if (error.fileName)
     filename = error.fileName;
 
-  _log_message_with_stack("test_unexpected_fail",
-                          error, stack, filename);
+  _testLogger.error(_exception_message(error),
+                    {
+                      source_file: filename,
+                      stack: _format_stack(stack)
+                    });
+  _abort_failed_test();
+}
 
+function _abort_failed_test() {
+  // Called to abort the test run after all failures are logged.
   _passed = false;
   _do_quit();
   throw Components.results.NS_ERROR_ABORT;
 }
 
 function _format_stack(stack) {
   let normalized;
   if (stack instanceof Components.interfaces.nsIStackFrame) {
@@ -630,27 +576,16 @@ function _format_stack(stack) {
     }
     normalized = frames.join("\n");
   } else {
     normalized = "" + stack;
   }
   return _Task.Debugging.generateReadableStack(normalized, "    ");
 }
 
-function do_throw_todo(text, stack) {
-  if (!stack)
-    stack = Components.stack.caller;
-
-  _passed = false;
-  _log_message_with_stack("test_unexpected_pass",
-                          text, stack, stack.filename);
-  _do_quit();
-  throw Components.results.NS_ERROR_ABORT;
-}
-
 // Make a nice display string from an object that behaves
 // like Error
 function _exception_message(ex) {
   let message = "";
   if (ex.name) {
     message = ex.name + ": ";
   }
   if (ex.message) {
@@ -664,49 +599,37 @@ function _exception_message(ex) {
   }
   if (message !== "") {
     return message;
   }
   // Force ex to be stringified
   return "" + ex;
 }
 
-function _log_message_with_stack(action, ex, stack, filename, text) {
-  if (stack) {
-    _log(action,
-         {diagnostic: (text ? text : "") +
-                      _exception_message(ex) +
-                      " - See following stack:",
-          source_file: filename,
-          stack: _format_stack(stack)});
-  } else {
-    _log(action,
-         {diagnostic: (text ? text : "") +
-                      _exception_message(ex),
-          source_file: filename});
-  }
-}
-
 function do_report_unexpected_exception(ex, text) {
-  var caller_stack = Components.stack.caller;
+  let filename = Components.stack.caller.filename;
   text = text ? text + " - " : "";
 
   _passed = false;
-  _log_message_with_stack("test_unexpected_fail", ex, ex.stack || "",
-                          caller_stack.filename, text + "Unexpected exception ");
+  _testLogger.error(text + "Unexpected exception " + _exception_message(ex),
+                    {
+                      source_file: filename,
+                      stack: _format_stack(ex.stack)
+                    });
   _do_quit();
   throw Components.results.NS_ERROR_ABORT;
 }
 
 function do_note_exception(ex, text) {
-  var caller_stack = Components.stack.caller;
-  text = text ? text + " - " : "";
-
-  _log_message_with_stack("test_info", ex, ex.stack,
-                          caller_stack.filename, text + "Swallowed exception ");
+  let filename = Components.stack.caller.filename;
+  _testLogger.info(text + "Swallowed exception " + _exception_message(ex),
+                   {
+                     source_file: filename,
+                     stack: _format_stack(ex.stack)
+                   });
 }
 
 function _do_check_neq(left, right, stack, todo) {
   Assert.notEqual(left, right);
 }
 
 function do_check_neq(left, right, stack) {
   if (!stack)
@@ -721,39 +644,56 @@ function todo_check_neq(left, right, sta
 
   _do_check_neq(left, right, stack, true);
 }
 
 function do_report_result(passed, text, stack, todo) {
   while (stack.filename.contains("head.js") && stack.caller) {
     stack = stack.caller;
   }
+
+  let name = _gRunningTest ? _gRunningTest.name : stack.name;
+  let message;
+  if (name) {
+     message = "[" + name + " : " + stack.lineNumber + "] " + text;
+  } else {
+    message = text;
+  }
+
   if (passed) {
     if (todo) {
-      do_throw_todo(text, stack);
+      _testLogger.testStatus(_TEST_NAME,
+                             name,
+                             "PASS",
+                             "FAIL",
+                             message,
+                             _format_stack(stack));
+      _abort_failed_test();
     } else {
-      ++_passedChecks;
-      _log("test_pass",
-           {source_file: stack.filename,
-            test_name: stack.name,
-            line_number: stack.lineNumber,
-            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
-                        text + "\n"});
+      _testLogger.testStatus(_TEST_NAME,
+                             name,
+                             "PASS",
+                             "PASS",
+                             message);
     }
   } else {
     if (todo) {
-      ++_todoChecks;
-      _log("test_known_fail",
-           {source_file: stack.filename,
-            test_name: stack.name,
-            line_number: stack.lineNumber,
-            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
-                        text + "\n"});
+      _testLogger.testStatus(_TEST_NAME,
+                             name,
+                             "FAIL",
+                             "FAIL",
+                             message);
     } else {
-      do_throw(text, stack);
+      _testLogger.testStatus(_TEST_NAME,
+                             name,
+                             "FAIL",
+                             "PASS",
+                             message,
+                             _format_stack(stack));
+      _abort_failed_test();
     }
   }
 }
 
 function _do_check_eq(left, right, stack, todo) {
   if (!stack)
     stack = Components.stack.caller;
 
@@ -873,27 +813,25 @@ function do_check_instanceof(value, cons
 function todo_check_instanceof(value, constructor,
                              stack=Components.stack.caller) {
   do_check_instanceof(value, constructor, stack, true);
 }
 
 function do_test_pending(aName) {
   ++_tests_pending;
 
-  _log("test_pending",
-       {_message: "TEST-INFO | (xpcshell/head.js) | test" +
-                  (aName ? " " + aName : "") +
-                  " pending (" + _tests_pending + ")\n"});
+  _testLogger.info("(xpcshell/head.js) | test" +
+                   (aName ? " " + aName : "") +
+                   " pending (" + _tests_pending + ")");
 }
 
 function do_test_finished(aName) {
-  _log("test_finish",
-       {_message: "TEST-INFO | (xpcshell/head.js) | test" +
-                  (aName ? " " + aName : "") +
-                  " finished (" + _tests_pending + ")\n"});
+  _testLogger.info("(xpcshell/head.js) | test" +
+                   (aName ? " " + aName : "") +
+                   " finished (" + _tests_pending + ")");
   if (--_tests_pending == 0)
     _do_quit();
 }
 
 function do_get_file(path, allowNonexistent) {
   try {
     let lf = Components.classes["@mozilla.org/file/directory_service;1"]
       .getService(Components.interfaces.nsIProperties)
@@ -908,22 +846,18 @@ function do_get_file(path, allowNonexist
           lf.append(bits[i]);
       }
     }
 
     if (!allowNonexistent && !lf.exists()) {
       // Not using do_throw(): caller will continue.
       _passed = false;
       var stack = Components.stack.caller;
-      _log("test_unexpected_fail",
-           {source_file: stack.filename,
-            test_name: stack.name,
-            line_number: stack.lineNumber,
-            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
-                        lf.path + " does not exist\n"});
+      _testLogger.error("[" + stack.name + " : " + stack.lineNumber + "] " +
+                        lf.path + " does not exist");
     }
 
     return lf;
   }
   catch (ex) {
     do_throw(ex.toString(), Components.stack.caller);
   }
 
@@ -935,17 +869,16 @@ function do_get_cwd() {
   return do_get_file("");
 }
 
 function do_load_manifest(path) {
   var lf = do_get_file(path);
   const nsIComponentRegistrar = Components.interfaces.nsIComponentRegistrar;
   do_check_true(Components.manager instanceof nsIComponentRegistrar);
   // Previous do_check_true() is not a test check.
-  ++_falsePassedChecks;
   Components.manager.autoRegister(lf);
 }
 
 /**
  * Parse a DOM document.
  *
  * @param aPath File path to the document.
  * @param aType Content type to use in DOMParser.
@@ -1033,19 +966,17 @@ function do_get_minidumpdir() {
 /**
  * Registers a directory with the profile service,
  * and return the directory as an nsILocalFile.
  *
  * @return nsILocalFile of the profile directory.
  */
 function do_get_profile() {
   if (!runningInParent) {
-    _log("test_info",
-         {_message: "TEST-INFO | (xpcshell/head.js) | Ignoring profile creation from child process.\n"});
-
+    _testLogger.info("Ignoring profile creation from child process.");
     return null;
   }
 
   if (!_profileInitialized) {
     // Since we have a profile, we will notify profile shutdown topics at
     // the end of the current test, to ensure correct cleanup on shutdown.
     do_register_cleanup(function() {
       let obsSvc = Components.classes["@mozilla.org/observer-service;1"].
@@ -1166,19 +1097,21 @@ function run_test_in_child(testFile, opt
 {
   var callback = (typeof optionalCallback == 'undefined') ? 
                     do_test_finished : optionalCallback;
 
   do_load_child_test_harness();
 
   var testPath = do_get_file(testFile).path.replace(/\\/g, "/");
   do_test_pending("run in child");
-  sendCommand("_log('child_test_start', {_message: 'CHILD-TEST-STARTED'}); "
-              + "const _TEST_FILE=['" + testPath + "']; _execute_test(); "
-              + "_log('child_test_end', {_message: 'CHILD-TEST-COMPLETED'});",
+  sendCommand("_testLogger.info('CHILD-TEST-STARTED'); "
+              + "const _TEST_FILE=['" + testPath + "']; "
+              + "const _TEST_NAME=" + uneval(_TEST_NAME) + "; "
+              + "_execute_test(); "
+              + "_testLogger.info('CHILD-TEST-COMPLETED');",
               callback);
 }
 
 /**
  * Execute a given function as soon as a particular cross-process message is received.
  * Must be paired with do_send_remote_message or equivalent ProcessMessageManager calls.
  */
 function do_await_remote_message(name, callback)
@@ -1295,17 +1228,17 @@ function run_next_test()
  
   function _run_next_test()
   {
     if (_gTestIndex < _gTests.length) {
       // Flush uncaught errors as early and often as possible.
       _Promise.Debugging.flushUncaughtErrors();
       let _isTask;
       [_isTask, _gRunningTest] = _gTests[_gTestIndex++];
-      print("TEST-INFO | " + _TEST_FILE + " | Starting " + _gRunningTest.name);
+      _testLogger.info(_TEST_NAME + " | Starting " + _gRunningTest.name);
       do_test_pending(_gRunningTest.name);
 
       if (_isTask) {
         _gTaskRunning = true;
         _Task.spawn(_gRunningTest).then(
           () => { _gTaskRunning = false; run_next_test(); },
           (ex) => { _gTaskRunning = false; do_report_unexpected_exception(ex); }
         );
--- a/testing/xpcshell/mach_commands.py
+++ b/testing/xpcshell/mach_commands.py
@@ -1,37 +1,39 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 # Integrates the xpcshell test runner with mach.
 
 from __future__ import unicode_literals, print_function
 
-import mozpack.path
-import logging
+import argparse
 import os
 import shutil
 import sys
 import urllib2
 
-from StringIO import StringIO
+from mozlog import structured
 
 from mozbuild.base import (
     MachCommandBase,
     MozbuildObject,
     MachCommandConditions as conditions,
 )
 
 from mach.decorators import (
     CommandArgument,
     CommandProvider,
     Command,
 )
 
+_parser = argparse.ArgumentParser()
+structured.commandline.add_logging_group(_parser)
+
 ADB_NOT_FOUND = '''
 The %s command requires the adb binary to be on your path.
 
 If you have a B2G build, this can be found in
 '%s/out/host/<platform>/bin'.
 '''.lstrip()
 
 BUSYBOX_URLS = {
@@ -40,21 +42,16 @@ BUSYBOX_URLS = {
 }
 
 
 if sys.version_info[0] < 3:
     unicode_type = unicode
 else:
     unicode_type = str
 
-# Simple filter to omit the message emitted as a test file begins.
-class TestStartFilter(logging.Filter):
-    def filter(self, record):
-        return not record.params['msg'].endswith("running test ...")
-
 # This should probably be consolidated with similar classes in other test
 # runners.
 class InvalidTestPathError(Exception):
     """Exception raised when the test path is not valid."""
 
 
 class XPCShellRunner(MozbuildObject):
     """Run xpcshell tests."""
@@ -64,16 +61,17 @@ class XPCShellRunner(MozbuildObject):
             '_tests', 'xpcshell', 'xpcshell.ini')])
 
         return self._run_xpcshell_harness(manifest=manifest, **kwargs)
 
     def run_test(self, test_paths, interactive=False,
                  keep_going=False, sequential=False, shuffle=False,
                  debugger=None, debuggerArgs=None, debuggerInteractive=None,
                  rerun_failures=False, test_objects=None, verbose=False,
+                 log=None,
                  # ignore parameters from other platforms' options
                  **kwargs):
         """Runs an individual xpcshell test."""
         from mozbuild.testing import TestResolver
         from manifestparser import TestManifest
 
         # TODO Bug 794506 remove once mach integrates with virtualenv.
         build_path = os.path.join(self.topobjdir, 'build')
@@ -81,17 +79,17 @@ class XPCShellRunner(MozbuildObject):
             sys.path.append(build_path)
 
         if test_paths == ['all']:
             self.run_suite(interactive=interactive,
                            keep_going=keep_going, shuffle=shuffle, sequential=sequential,
                            debugger=debugger, debuggerArgs=debuggerArgs,
                            debuggerInteractive=debuggerInteractive,
                            rerun_failures=rerun_failures,
-                           verbose=verbose)
+                           verbose=verbose, log=log)
             return
         elif test_paths:
             test_paths = [self._wrap_path_argument(p).relpath() for p in test_paths]
 
         if test_objects:
             tests = test_objects
         else:
             resolver = self._spawn(TestResolver)
@@ -113,67 +111,61 @@ class XPCShellRunner(MozbuildObject):
             'shuffle': shuffle,
             'sequential': sequential,
             'debugger': debugger,
             'debuggerArgs': debuggerArgs,
             'debuggerInteractive': debuggerInteractive,
             'rerun_failures': rerun_failures,
             'manifest': manifest,
             'verbose': verbose,
+            'log': log,
         }
 
         return self._run_xpcshell_harness(**args)
 
     def _run_xpcshell_harness(self, manifest,
                               test_path=None, shuffle=False, interactive=False,
                               keep_going=False, sequential=False,
                               debugger=None, debuggerArgs=None, debuggerInteractive=None,
-                              rerun_failures=False, verbose=False):
+                              rerun_failures=False, verbose=False, log=None):
 
         # Obtain a reference to the xpcshell test runner.
         import runxpcshelltests
 
-        dummy_log = StringIO()
-        xpcshell = runxpcshelltests.XPCShellTests(log=dummy_log)
+        xpcshell = runxpcshelltests.XPCShellTests(log=log)
         self.log_manager.enable_unstructured()
 
-        xpcshell_filter = TestStartFilter()
-        self.log_manager.terminal_handler.addFilter(xpcshell_filter)
-
         tests_dir = os.path.join(self.topobjdir, '_tests', 'xpcshell')
         modules_dir = os.path.join(self.topobjdir, '_tests', 'modules')
         # We want output from the test to be written immediately if we are only
         # running a single test.
         single_test = (test_path is not None or
                        (manifest and len(manifest.test_paths())==1))
-        verbose_output = verbose or single_test
         sequential = sequential or single_test
 
         args = {
             'manifest': manifest,
             'xpcshell': self.get_binary_path('xpcshell'),
             'mozInfo': os.path.join(self.topobjdir, 'mozinfo.json'),
             'symbolsPath': os.path.join(self.distdir, 'crashreporter-symbols'),
             'interactive': interactive,
             'keepGoing': keep_going,
             'logfiles': False,
             'sequential': sequential,
             'shuffle': shuffle,
             'testsRootDir': tests_dir,
             'testingModulesDir': modules_dir,
             'profileName': 'firefox',
-            'verbose': test_path is not None,
+            'verbose': verbose or single_test,
             'xunitFilename': os.path.join(self.statedir, 'xpchsell.xunit.xml'),
             'xunitName': 'xpcshell',
             'pluginsPath': os.path.join(self.distdir, 'plugins'),
             'debugger': debugger,
             'debuggerArgs': debuggerArgs,
             'debuggerInteractive': debuggerInteractive,
-            'on_message': (lambda obj, msg: xpcshell.log.info(msg.decode('utf-8', 'replace'))) \
-                            if verbose_output else None,
         }
 
         if test_path is not None:
             args['testPath'] = test_path
 
         # A failure manifest is written by default. If --rerun-failures is
         # specified and a prior failure manifest is found, the prior manifest
         # will be run. A new failure manifest is always written over any
@@ -199,17 +191,16 @@ class XPCShellRunner(MozbuildObject):
 
             if isinstance(k, unicode_type):
                 k = k.encode('utf-8')
 
             filtered_args[k] = v
 
         result = xpcshell.runTests(**filtered_args)
 
-        self.log_manager.terminal_handler.removeFilter(xpcshell_filter)
         self.log_manager.disable_unstructured()
 
         if not result and not xpcshell.sequential:
             print("Tests were run in parallel. Try running with --sequential "
                   "to make sure the failures were not caused by this.")
         return int(not result)
 
 class AndroidXPCShellRunner(MozbuildObject):
@@ -228,17 +219,17 @@ class AndroidXPCShellRunner(MozbuildObje
             else:
                 raise Exception("You must provide a device IP to connect to via the --ip option")
         return dm
 
     """Run Android xpcshell tests."""
     def run_test(self,
                  test_paths, keep_going,
                  devicemanager, ip, port, remote_test_root, no_setup, local_apk,
-                 test_objects=None,
+                 test_objects=None, log=None,
                  # ignore parameters from other platforms' options
                  **kwargs):
         # TODO Bug 794506 remove once mach integrates with virtualenv.
         build_path = os.path.join(self.topobjdir, 'build')
         if build_path not in sys.path:
             sys.path.append(build_path)
 
         import remotexpcshelltests
@@ -283,31 +274,25 @@ class AndroidXPCShellRunner(MozbuildObje
             options.testPath = test_objects[0]['path']
             options.verbose = True
         else:
             if len(test_paths) > 1:
                 print('Warning: only the first test path argument will be used.')
             testdirs = test_paths[0]
             options.testPath = test_paths[0]
             options.verbose = True
-        dummy_log = StringIO()
-        xpcshell = remotexpcshelltests.XPCShellRemote(dm, options, args=testdirs, log=dummy_log)
-        self.log_manager.enable_unstructured()
 
-        xpcshell_filter = TestStartFilter()
-        self.log_manager.terminal_handler.addFilter(xpcshell_filter)
+        xpcshell = remotexpcshelltests.XPCShellRemote(dm, options, testdirs, log)
 
         result = xpcshell.runTests(xpcshell='xpcshell',
                       testClass=remotexpcshelltests.RemoteXPCShellTestThread,
                       testdirs=testdirs,
                       mobileArgs=xpcshell.mobileArgs,
                       **options.__dict__)
 
-        self.log_manager.terminal_handler.removeFilter(xpcshell_filter)
-        self.log_manager.disable_unstructured()
 
         return int(not result)
 
 class B2GXPCShellRunner(MozbuildObject):
     def __init__(self, *args, **kwargs):
         MozbuildObject.__init__(self, *args, **kwargs)
 
         # TODO Bug 794506 remove once mach integrates with virtualenv.
@@ -343,17 +328,17 @@ class B2GXPCShellRunner(MozbuildObject):
                   'initial setup will be slow.')
             return
 
         with open(busybox_path, 'wb') as f:
             f.write(data.read())
         return busybox_path
 
     def run_test(self, test_paths, b2g_home=None, busybox=None, device_name=None,
-                 test_objects=None,
+                 test_objects=None, log=None,
                  # ignore parameters from other platforms' options
                  **kwargs):
         try:
             import which
             which.which('adb')
         except which.WhichError:
             # TODO Find adb automatically if it isn't on the path
             print(ADB_NOT_FOUND % ('mochitest-remote', b2g_home))
@@ -392,17 +377,17 @@ class B2GXPCShellRunner(MozbuildObject):
         options.emulator = 'arm'
         if device_name.startswith('emulator'):
             if 'x86' in device_name:
                 options.emulator = 'x86'
 
         if not options.busybox:
             options.busybox = self._download_busybox(b2g_home, options.emulator)
 
-        return runtestsb2g.run_remote_xpcshell(parser, options, args)
+        return runtestsb2g.run_remote_xpcshell(parser, options, args, log)
 
 def is_platform_supported(cls):
     """Must have a Firefox, Android or B2G build."""
     return conditions.is_android(cls) or \
            conditions.is_b2g(cls) or \
            conditions.is_firefox(cls)
 
 @CommandProvider
@@ -410,17 +395,18 @@ class MachCommands(MachCommandBase):
     def __init__(self, context):
         MachCommandBase.__init__(self, context)
 
         for attr in ('b2g_home', 'device_name'):
             setattr(self, attr, getattr(context, attr, None))
 
     @Command('xpcshell-test', category='testing',
         conditions=[is_platform_supported],
-        description='Run XPCOM Shell tests (API direct unit testing)')
+        description='Run XPCOM Shell tests (API direct unit testing)',
+        parser=_parser)
     @CommandArgument('test_paths', default='all', nargs='*', metavar='TEST',
         help='Test to run. Can be specified as a single JS file, a directory, '
              'or omitted. If omitted, the entire test suite is executed.')
     @CommandArgument('--verbose', '-v', action='store_true',
         help='Provide full output from each test process.')
     @CommandArgument("--debugger", default=None, metavar='DEBUGGER',
                      help = "Run xpcshell under the given debugger.")
     @CommandArgument("--debugger-args", default=None, metavar='ARGS', type=str,
@@ -461,16 +447,21 @@ class MachCommands(MachCommandBase):
         # We should probably have a utility function to ensure the tree is
         # ready to run tests. Until then, we just create the state dir (in
         # case the tree wasn't built with mach).
         self._ensure_state_subdir_exists('.')
 
         driver = self._spawn(BuildDriver)
         driver.install_tests(remove=False)
 
+        structured.commandline.formatter_option_defaults['verbose'] = True
+        params['log'] = structured.commandline.setup_logging("XPCShellTests",
+                                                             params,
+                                                             {"mach": sys.stdout})
+
         if conditions.is_android(self):
             xpcshell = self._spawn(AndroidXPCShellRunner)
         elif conditions.is_b2g(self):
             xpcshell = self._spawn(B2GXPCShellRunner)
             params['b2g_home'] = self.b2g_home
             params['device_name'] = self.device_name
         else:
             xpcshell = self._spawn(XPCShellRunner)
--- a/testing/xpcshell/remotexpcshelltests.py
+++ b/testing/xpcshell/remotexpcshelltests.py
@@ -6,16 +6,18 @@
 
 import posixpath
 import sys, os
 import subprocess
 import runxpcshelltests as xpcshell
 import tempfile
 from automationutils import replaceBackSlashes
 from zipfile import ZipFile
+from mozlog import structured
+from mozlog.structured import commandline
 import shutil
 import mozdevice
 import mozfile
 import mozinfo
 
 here = os.path.dirname(os.path.abspath(__file__))
 
 def remoteJoin(path1, path2):
@@ -58,30 +60,30 @@ class RemoteXPCShellTestThread(xpcshell.
             return None
 
         # making sure tmp dir is set up
         self.setupTempDir()
 
         pluginsDir = remoteJoin(self.remoteTmpDir, "plugins")
         self.device.pushDir(self.pluginsPath, pluginsDir)
         if self.interactive:
-            self.log.info("TEST-INFO | plugins dir is %s" % pluginsDir)
+            self.log.info("plugins dir is %s" % pluginsDir)
         return pluginsDir
 
     def setupProfileDir(self):
         self.device.removeDir(self.profileDir)
         self.device.mkDir(self.profileDir)
         if self.interactive or self.singleFile:
-            self.log.info("TEST-INFO | profile dir is %s" % self.profileDir)
+            self.log.info("profile dir is %s" % self.profileDir)
         return self.profileDir
 
     def logCommand(self, name, completeCmd, testdir):
-        self.log.info("TEST-INFO | %s | full command: %r" % (name, completeCmd))
-        self.log.info("TEST-INFO | %s | current directory: %r" % (name, self.remoteHere))
-        self.log.info("TEST-INFO | %s | environment: %s" % (name, self.env))
+        self.log.info("%s | full command: %r" % (name, completeCmd))
+        self.log.info("%s | current directory: %r" % (name, self.remoteHere))
+        self.log.info("%s | environment: %s" % (name, self.env))
 
     def getHeadAndTailFiles(self, test):
         """Override parent method to find files on remote device."""
         def sanitize_list(s, kind):
             for f in s.strip().split(' '):
                 f = f.strip()
                 if len(f) < 1:
                     continue
@@ -115,20 +117,17 @@ class RemoteXPCShellTestThread(xpcshell.
 
         if self.remoteDebugger:
             # for example, "/data/local/gdbserver" "localhost:12345"
             self.xpcsCmd = [
               self.remoteDebugger,
               self.remoteDebuggerArgs,
               self.xpcsCmd]
 
-    def testTimeout(self, test_file, proc):
-        self.timedout = True
-        if not self.retry:
-            self.log.error("TEST-UNEXPECTED-FAIL | %s | Test timed out" % test_file)
+    def killTimeout(self, proc):
         self.kill(proc)
 
     def launchProcess(self, cmd, stdout, stderr, env, cwd, timeout=None):
         self.timedout = False
         cmd.insert(1, self.remoteHere)
         outputFile = "xpcshelloutput"
         with open(outputFile, 'w+') as f:
             try:
@@ -206,17 +205,17 @@ class RemoteXPCShellTestThread(xpcshell.
             if f is not None:
                 f.close()
 
 
 # A specialization of XPCShellTests that runs tests on an Android device
 # via devicemanager.
 class XPCShellRemote(xpcshell.XPCShellTests, object):
 
-    def __init__(self, devmgr, options, args, log=None):
+    def __init__(self, devmgr, options, args, log):
         xpcshell.XPCShellTests.__init__(self, log)
 
         # Add Android version (SDK level) to mozinfo so that manifest entries
         # can be conditional on android_version.
         androidVersion = devmgr.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
         mozinfo.info['android_version'] = androidVersion
 
         self.localLib = options.localLib
@@ -570,29 +569,33 @@ class PathMapping:
 
 def main():
 
     if sys.version_info < (2,7):
         print >>sys.stderr, "Error: You must use python version 2.7 or newer but less than 3.0"
         sys.exit(1)
 
     parser = RemoteXPCShellOptions()
+    structured.commandline.add_logging_group(parser)
     options, args = parser.parse_args()
     if not options.localAPK:
         for file in os.listdir(os.path.join(options.objdir, "dist")):
             if (file.endswith(".apk") and file.startswith("fennec")):
                 options.localAPK = os.path.join(options.objdir, "dist")
                 options.localAPK = os.path.join(options.localAPK, file)
                 print >>sys.stderr, "using APK: " + options.localAPK
                 break
         else:
             print >>sys.stderr, "Error: please specify an APK"
             sys.exit(1)
 
     options = parser.verifyRemoteOptions(options)
+    log = commandline.setup_logging("Remote XPCShell",
+                                    options,
+                                    {"tbpl": sys.stdout})
 
     if len(args) < 1 and options.manifest is None:
         print >>sys.stderr, """Usage: %s <test dirs>
              or: %s --manifest=test.manifest """ % (sys.argv[0], sys.argv[0])
         sys.exit(1)
 
     if options.dm_trans == "adb":
         if options.deviceIP:
@@ -604,17 +607,17 @@ def main():
             print "Error: you must provide a device IP to connect to via the --device option"
             sys.exit(1)
         dm = mozdevice.DroidSUT(options.deviceIP, options.devicePort, deviceRoot=options.remoteTestRoot)
 
     if options.interactive and not options.testPath:
         print >>sys.stderr, "Error: You must specify a test filename in interactive mode!"
         sys.exit(1)
 
-    xpcsh = XPCShellRemote(dm, options, args)
+    xpcsh = XPCShellRemote(dm, options, args, log)
 
     # we don't run concurrent tests on mobile
     options.sequential = True
 
     if not xpcsh.runTests(xpcshell='xpcshell',
                           testClass=RemoteXPCShellTestThread,
                           testdirs=args[0:],
                           mobileArgs=xpcsh.mobileArgs,
--- a/testing/xpcshell/runtestsb2g.py
+++ b/testing/xpcshell/runtestsb2g.py
@@ -6,16 +6,18 @@
 
 import sys
 import os
 sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(sys.argv[0]))))
 
 import traceback
 from remotexpcshelltests import RemoteXPCShellTestThread, XPCShellRemote, RemoteXPCShellOptions
 from mozdevice import devicemanagerADB, DMError
+from mozlog import structured
+from mozlog.structured import commandline
 
 DEVICE_TEST_ROOT = '/data/local/tests'
 
 
 from marionette import Marionette
 
 class B2GXPCShellTestThread(RemoteXPCShellTestThread):
     # Overridden
@@ -148,17 +150,17 @@ class B2GOptions(RemoteXPCShellOptions):
 
         if options.geckoPath and not options.emulator:
             self.error("You must specify --emulator if you specify --gecko-path")
 
         if options.logdir and not options.emulator:
             self.error("You must specify --emulator if you specify --logdir")
         return RemoteXPCShellOptions.verifyRemoteOptions(self, options)
 
-def run_remote_xpcshell(parser, options, args):
+def run_remote_xpcshell(parser, options, args, log):
     options = parser.verifyRemoteOptions(options)
 
     # Create the Marionette instance
     kwargs = {}
     if options.emulator:
         kwargs['emulator'] = options.emulator
         if options.no_window:
             kwargs['noWindow'] = True
@@ -189,17 +191,17 @@ def run_remote_xpcshell(parser, options,
         if options.deviceIP:
             kwargs['host'] = options.deviceIP
             kwargs['port'] = options.devicePort
         kwargs['deviceRoot'] = options.remoteTestRoot
         dm = devicemanagerADB.DeviceManagerADB(**kwargs)
 
     if not options.remoteTestRoot:
         options.remoteTestRoot = dm.deviceRoot
-    xpcsh = B2GXPCShellRemote(dm, options, args)
+    xpcsh = B2GXPCShellRemote(dm, options, args, log)
 
     # we don't run concurrent tests on mobile
     options.sequential = True
 
     try:
         if not xpcsh.runTests(xpcshell='xpcshell', testdirs=args[0:],
                                  testClass=B2GXPCShellTestThread,
                                  mobileArgs=xpcsh.mobileArgs,
@@ -207,22 +209,21 @@ def run_remote_xpcshell(parser, options,
             sys.exit(1)
     except:
         print "Automation Error: Exception caught while running tests"
         traceback.print_exc()
         sys.exit(1)
 
 def main():
     parser = B2GOptions()
+    structured.commandline.add_logging_group(parser)
     options, args = parser.parse_args()
-
-    run_remote_xpcshell(parser, options, args)
+    log = commandline.setup_logging("Remote XPCShell",
+                                    options,
+                                    {"tbpl": sys.stdout})
+    run_remote_xpcshell(parser, options, args, log)
 
 # You usually run this like :
 # python runtestsb2g.py --emulator arm --b2gpath $B2GPATH --manifest $MANIFEST [--xre-path $MOZ_HOST_BIN
 #                                                                               --adbpath $ADB_PATH
 #                                                                               ...]
-#
-# For xUnit output you should also pass in --tests-root-dir ..objdir-gecko/_tests
-#                                          --xunit-file ..objdir_gecko/_tests/results.xml
-#                                          --xunit-suite-name xpcshell-tests
 if __name__ == '__main__':
     main()
--- a/testing/xpcshell/runxpcshelltests.py
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -10,102 +10,90 @@ import math
 import mozdebug
 import mozinfo
 import os
 import os.path
 import random
 import re
 import shutil
 import signal
-import socket
 import sys
 import time
 import traceback
-import xml.dom.minidom
+
 from collections import deque
 from distutils import dir_util
 from multiprocessing import cpu_count
 from optparse import OptionParser
 from subprocess import Popen, PIPE, STDOUT
 from tempfile import mkdtemp, gettempdir
-from threading import Timer, Thread, Event, RLock
+from threading import (
+    Timer,
+    Thread,
+    Event,
+    current_thread,
+)
 
 try:
     import psutil
     HAVE_PSUTIL = True
 except ImportError:
     HAVE_PSUTIL = False
 
-from automation import Automation, getGlobalLog, resetGlobalLog
-from automationutils import *
-
-# Printing buffered output in case of a failure or verbose mode will result
-# in buffered output interleaved with other threads' output.
-# To prevent his, each call to the logger as well as any blocks of output that
-# are intended to be continuous are protected by the same lock.
-LOG_MUTEX = RLock()
+from automation import Automation
+from automationutils import replaceBackSlashes, addCommonOptions
 
 HARNESS_TIMEOUT = 5 * 60
 
 # benchmarking on tbpl revealed that this works best for now
 NUM_THREADS = int(cpu_count() * 4)
 
-FAILURE_ACTIONS = set(['test_unexpected_fail',
-                       'test_unexpected_pass',
-                       'javascript_error'])
-ACTION_STRINGS = {
-    "test_unexpected_fail": "TEST-UNEXPECTED-FAIL",
-    "test_known_fail": "TEST-KNOWN-FAIL",
-    "test_unexpected_pass": "TEST-UNEXPECTED-PASS",
-    "javascript_error": "TEST-UNEXPECTED-FAIL",
-    "test_pass": "TEST-PASS",
-    "test_info": "TEST-INFO"
-}
+EXPECTED_LOG_ACTIONS = set([
+    "test_status",
+    "log",
+])
 
 # --------------------------------------------------------------
 # TODO: this is a hack for mozbase without virtualenv, remove with bug 849900
 #
 here = os.path.dirname(__file__)
 mozbase = os.path.realpath(os.path.join(os.path.dirname(here), 'mozbase'))
 
 if os.path.isdir(mozbase):
     for package in os.listdir(mozbase):
         sys.path.append(os.path.join(mozbase, package))
 
 import manifestparser
 import mozcrash
 import mozinfo
+from mozlog import structured
 
 # --------------------------------------------------------------
 
 # TODO: perhaps this should be in a more generally shared location?
 # This regex matches all of the C0 and C1 control characters
 # (U+0000 through U+001F; U+007F; U+0080 through U+009F),
 # except TAB (U+0009), CR (U+000D), LF (U+000A) and backslash (U+005C).
 # A raw string is deliberately not used.
 _cleanup_encoding_re = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f\\\\]')
 def _cleanup_encoding_repl(m):
     c = m.group(0)
     return '\\\\' if c == '\\' else '\\x{0:02X}'.format(ord(c))
 def cleanup_encoding(s):
     """S is either a byte or unicode string.  Either way it may
        contain control characters, unpaired surrogates, reserved code
        points, etc.  If it is a byte string, it is assumed to be
-       UTF-8, but it may not be *correct* UTF-8.  Produce a byte
-       string that can safely be dumped into a (generally UTF-8-coded)
-       logfile."""
+       UTF-8, but it may not be *correct* UTF-8.  Return a
+       sanitized unicode object."""
+    if not isinstance(s, basestring):
+        return unicode(s)
     if not isinstance(s, unicode):
         s = s.decode('utf-8', 'replace')
-    if s.endswith('\n'):
-        # A new line is always added by head.js to delimit messages,
-        # however consumers will want to supply their own.
-        s = s[:-1]
     # Replace all C0 and C1 control characters with \xNN escapes.
-    s = _cleanup_encoding_re.sub(_cleanup_encoding_repl, s)
-    return s.encode('utf-8', 'backslashreplace')
+    return _cleanup_encoding_re.sub(_cleanup_encoding_repl, s)
 
 """ Control-C handling """
 gotSIGINT = False
 def markGotSIGINT(signum, stackFrame):
     global gotSIGINT
     gotSIGINT = True
 
 class XPCShellTestThread(Thread):
@@ -132,54 +120,57 @@ class XPCShellTestThread(Thread):
         self.profileName = kwargs.get('profileName')
         self.singleFile = kwargs.get('singleFile')
         self.env = copy.deepcopy(kwargs.get('env'))
         self.symbolsPath = kwargs.get('symbolsPath')
         self.logfiles = kwargs.get('logfiles')
         self.xpcshell = kwargs.get('xpcshell')
         self.xpcsRunArgs = kwargs.get('xpcsRunArgs')
         self.failureManifest = kwargs.get('failureManifest')
-        self.on_message = kwargs.get('on_message')
 
         self.tests_root_dir = tests_root_dir
         self.app_dir_key = app_dir_key
         self.interactive = interactive
         self.verbose = verbose
         self.pStdout = pStdout
         self.pStderr = pStderr
         self.keep_going = keep_going
         self.log = log
 
         # only one of these will be set to 1. adding them to the totals in
         # the harness
         self.passCount = 0
         self.todoCount = 0
         self.failCount = 0
 
+        # Context for output processing
         self.output_lines = []
         self.has_failure_output = False
         self.saw_proc_start = False
         self.saw_proc_end = False
+        self.complete_command = None
+        self.harness_timeout = kwargs.get('harness_timeout')
+        self.timedout = False
 
         # event from main thread to signal work done
         self.event = event
         self.done = False # explicitly set flag so we don't rely on thread.isAlive
 
     def run(self):
         try:
             self.run_test()
         except Exception as e:
             self.exception = e
             self.traceback = traceback.format_exc()
         else:
             self.exception = None
             self.traceback = None
         if self.retry:
-            self.log.info("TEST-INFO | %s | Test failed or timed out, will retry."
-                          % self.test_object['name'])
+            self.log.info("%s failed or timed out, will retry." %
+                          self.test_object['id'])
         self.done = True
         self.event.set()
 
     def kill(self, proc):
         """
           Simple wrapper to kill a process.
           On a remote system, this is overloaded to handle remote process communication.
         """
@@ -255,56 +246,96 @@ class XPCShellTestThread(Thread):
                         test_name=None):
         """
           Simple wrapper to check for crashes.
           On a remote system, this is more complex and we need to overload this function.
         """
         return mozcrash.check_for_crashes(dump_directory, symbols_path, test_name=test_name)
 
     def logCommand(self, name, completeCmd, testdir):
-        self.log.info("TEST-INFO | %s | full command: %r" % (name, completeCmd))
-        self.log.info("TEST-INFO | %s | current directory: %r" % (name, testdir))
+        self.log.info("%s | full command: %r" % (name, completeCmd))
+        self.log.info("%s | current directory: %r" % (name, testdir))
         # Show only those environment variables that are changed from
         # the ambient environment.
         changedEnv = (set("%s=%s" % i for i in self.env.iteritems())
                       - set("%s=%s" % i for i in os.environ.iteritems()))
-        self.log.info("TEST-INFO | %s | environment: %s" % (name, list(changedEnv)))
+        self.log.info("%s | environment: %s" % (name, list(changedEnv)))
+
+    def killTimeout(self, proc):
+        Automation().killAndGetStackNoScreenshot(proc.pid,
+                                                 self.appPath,
+                                                 self.debuggerInfo)
+
+    def postCheck(self, proc):
+        """Checks for a still-running test process, kills it and fails the test if found.
+        We can sometimes get here before the process has terminated, which would
+        cause removeDir() to fail - so check for the process and kill it if needed.
+        """
+        if proc and self.poll(proc) is None:
+            self.kill(proc)
+            message = "%s | Process still running after test!" % self.test_object['id']
+            if self.retry:
+                self.log.info(message)
+                self.log_full_output(self.output_lines)
+                return
+
+            self.log.error(message)
+            self.log_full_output(self.output_lines)
+            self.failCount = 1
 
-    def testTimeout(self, test_file, proc):
-        if not self.retry:
-            self.log.error("TEST-UNEXPECTED-FAIL | %s | Test timed out" % test_file)
+    def testTimeout(self, proc):
+        if self.test_object['expected'] == 'pass':
+            expected = 'PASS'
+        else:
+            expected = 'FAIL'
+
+        if self.retry:
+            self.log.test_end(self.test_object['id'], 'TIMEOUT',
+                              expected='TIMEOUT',
+                              message="Test timed out")
+        else:
+            self.failCount = 1
+            self.log.test_end(self.test_object['id'], 'TIMEOUT',
+                              expected=expected,
+                              message="Test timed out")
+            self.log_full_output(self.output_lines)
+
         self.done = True
-        Automation().killAndGetStackNoScreenshot(proc.pid, self.appPath, self.debuggerInfo)
+        self.timedout = True
+        self.killTimeout(proc)
+        self.log.info("xpcshell return code: %s" % self.getReturnCode(proc))
+        self.postCheck(proc)
+        self.clean_temp_dirs(self.test_object['path'])
 
     def buildCmdTestFile(self, name):
         """
           Build the command line arguments for the test file.
           On a remote system, this may be overloaded to use a remote path structure.
         """
         return ['-e', 'const _TEST_FILE = ["%s"];' %
                   replaceBackSlashes(name)]
 
     def setupTempDir(self):
         tempDir = mkdtemp()
         self.env["XPCSHELL_TEST_TEMP_DIR"] = tempDir
         if self.interactive:
-            self.log.info("TEST-INFO | temp dir is %s" % tempDir)
+            self.log.info("temp dir is %s" % tempDir)
         return tempDir
 
     def setupPluginsDir(self):
         if not os.path.isdir(self.pluginsPath):
             return None
 
         pluginsDir = mkdtemp()
         # shutil.copytree requires dst to not exist. Deleting the tempdir
         # would make a race condition possible in a concurrent environment,
         # so we are using dir_utils.copy_tree which accepts an existing dst
         dir_util.copy_tree(self.pluginsPath, pluginsDir)
         if self.interactive:
-            self.log.info("TEST-INFO | plugins dir is %s" % pluginsDir)
+            self.log.info("plugins dir is %s" % pluginsDir)
         return pluginsDir
 
     def setupProfileDir(self):
         """
           Create a temporary folder for the profile and set appropriate environment variables.
           When running check-interactive and check-one, the directory is well-defined and
           retained for inspection once the tests complete.
 
@@ -317,17 +348,17 @@ class XPCShellTestThread(Thread):
                 self.removeDir(profileDir)
             except:
                 pass
             os.makedirs(profileDir)
         else:
             profileDir = mkdtemp()
         self.env["XPCSHELL_TEST_PROFILE_DIR"] = profileDir
         if self.interactive or self.singleFile:
-            self.log.info("TEST-INFO | profile dir is %s" % profileDir)
+            self.log.info("profile dir is %s" % profileDir)
         return profileDir
 
     def buildCmdHead(self, headfiles, tailfiles, xpcscmd):
         """
           Build the command line arguments for the head and tail files,
           along with the address of the webserver which some tests require.
 
           On a remote system, this is overloaded to resolve quoting issues over a secondary command line.
@@ -406,210 +437,181 @@ class XPCShellTestThread(Thread):
         # setting up every test with its own plugins directory.
         if not self.pluginsPath:
             self.pluginsPath = os.path.join(self.appPath, 'plugins')
 
         self.pluginsDir = self.setupPluginsDir()
         if self.pluginsDir:
             self.xpcsCmd.extend(['-p', self.pluginsDir])
 
-    def cleanupDir(self, directory, name, xunit_result):
+    def cleanupDir(self, directory, name):
         if not os.path.exists(directory):
             return
 
         TRY_LIMIT = 25 # up to TRY_LIMIT attempts (one every second), because
                        # the Windows filesystem is slow to react to the changes
         try_count = 0
         while try_count < TRY_LIMIT:
             try:
                 self.removeDir(directory)
             except OSError:
-                self.log.info("TEST-INFO | Failed to remove directory: %s. Waiting." % directory)
+                self.log.info("Failed to remove directory: %s. Waiting." % directory)
                 # We suspect the filesystem may still be making changes. Wait a
                 # little bit and try again.
                 time.sleep(1)
                 try_count += 1
             else:
                 # removed fine
                 return
 
         # we try cleaning up again later at the end of the run
         self.cleanup_dir_list.append(directory)
 
-    def clean_temp_dirs(self, name, stdout):
+    def clean_temp_dirs(self, name):
         # We don't want to delete the profile when running check-interactive
         # or check-one.
         if self.profileDir and not self.interactive and not self.singleFile:
-            self.cleanupDir(self.profileDir, name, self.xunit_result)
+            self.cleanupDir(self.profileDir, name)
 
-        self.cleanupDir(self.tempDir, name, self.xunit_result)
+        self.cleanupDir(self.tempDir, name)
 
         if self.pluginsDir:
-            self.cleanupDir(self.pluginsDir, name, self.xunit_result)
-
-    def message_from_line(self, line):
-        """ Given a line of raw output, convert to a string message. """
-        if isinstance(line, basestring):
-            # This function has received unstructured output.
-            if line:
-                if 'TEST-UNEXPECTED-' in line:
-                    self.has_failure_output = True
-            return line
-
-        msg = ['%s: ' % line['process'] if 'process' in line else '']
-
-        # Each call to the logger in head.js either specified '_message'
-        # or both 'source_file' and 'diagnostic'. If either of these are
-        # missing, they ended up being undefined as a result of the way
-        # the test was run.
-        if '_message' in line:
-            msg.append(line['_message'])
-            if 'diagnostic' in line:
-                msg.append('\nDiagnostic: %s' % line['diagnostic'])
-        else:
-            msg.append('%s | %s | %s' % (ACTION_STRINGS[line['action']],
-                                         line.get('source_file', 'undefined'),
-                                         line.get('diagnostic', 'undefined')))
-
-        msg.append('\n%s' % line['stack'] if 'stack' in line else '')
-        return ''.join(msg)
+            self.cleanupDir(self.pluginsDir, name)
 
     def parse_output(self, output):
         """Parses process output for structured messages and saves output as it is
         read. Sets self.has_failure_output in case of evidence of a failure"""
         for line_string in output.splitlines():
             self.process_line(line_string)
 
         if self.saw_proc_start and not self.saw_proc_end:
             self.has_failure_output = True
 
-    def report_message(self, line):
-        """ Reports a message to a consumer, both as a strucutured and
-        human-readable log message. """
+    def log_line(self, line):
+        """Log a line of output (either a parser json object or text output from
+        the test process"""
+        if isinstance(line, basestring):
+            line = cleanup_encoding(line)
+            self.log.process_output(self.proc_ident,
+                                    line,
+                                    command=self.complete_command)
+        else:
+            if 'message' in line:
+                line['message'] = cleanup_encoding(line['message'])
+            if 'xpcshell_process' in line:
+                line['thread'] =  ' '.join([current_thread().name, line['xpcshell_process']])
+            else:
+                line['thread'] = current_thread().name
+            self.log.log_raw(line)
 
-        message = cleanup_encoding(self.message_from_line(line))
-        if message.endswith('\n'):
-            # A new line is always added by head.js to delimit messages,
-            # however consumers will want to supply their own.
-            message = message[:-1]
+    def log_full_output(self, output):
+        """Log output any buffered output from the test process"""
+        if not output:
+            return
+        self.log.info(">>>>>>>")
+        for line in output:
+            self.log_line(line)
+        self.log.info("<<<<<<<")
 
-        if self.on_message:
-            self.on_message(line, message)
+    def report_message(self, message):
+        """Stores or logs a json log message in mozlog.structured
+        format."""
+        if self.verbose:
+            self.log_line(message)
         else:
-            self.output_lines.append(message)
+            # Tests eligible to retry will never dump their buffered output.
+            if not self.retry:
+                self.output_lines.append(message)
 
     def process_line(self, line_string):
         """ Parses a single line of output, determining its significance and
         reporting a message.
         """
+        if not line_string.strip():
+            return
+
         try:
             line_object = json.loads(line_string)
             if not isinstance(line_object, dict):
                 self.report_message(line_string)
                 return
         except ValueError:
             self.report_message(line_string)
             return
 
-        if 'action' not in line_object:
-            # In case a test outputs something that happens to be valid
-            # JSON.
+        if ('action' not in line_object or
+            line_object['action'] not in EXPECTED_LOG_ACTIONS):
+            # The test process output JSON.
             self.report_message(line_string)
             return
 
         action = line_object['action']
+
+        self.has_failure_output = (self.has_failure_output or
+                                   'expected' in line_object or
+                                   action == 'log' and line_object['level'] == 'ERROR')
+
         self.report_message(line_object)
 
-        if action in FAILURE_ACTIONS:
-            self.has_failure_output = True
-        elif action == 'child_test_start':
-            self.saw_proc_start = True
-        elif action == 'child_test_end':
+        if action == 'log' and line_object['message'] == 'CHILD-TEST-STARTED':
+             self.saw_proc_start = True
+        elif action == 'log' and line_object['message'] == 'CHILD-TEST-COMPLETED':
             self.saw_proc_end = True
 
-    def log_output(self, output):
-        """Prints given output line-by-line to avoid overflowing buffers."""
-        self.log.info(">>>>>>>")
-        if output:
-            if isinstance(output, basestring):
-                output = output.splitlines()
-            for part in output:
-                # For multi-line output, such as a stack trace
-                for line in part.splitlines():
-                    try:
-                        line = line.decode('utf-8')
-                    except UnicodeDecodeError:
-                        self.log.info("TEST-INFO | %s | Detected non UTF-8 output."\
-                                      " Please modify the test to only print UTF-8." %
-                                      self.test_object['name'])
-                        # add '?' instead of funky bytes
-                        line = line.decode('utf-8', 'replace')
-                    self.log.info(line)
-        self.log.info("<<<<<<<")
-
     def run_test(self):
         """Run an individual xpcshell test."""
         global gotSIGINT
 
-        name = self.test_object['path']
-
-        self.xunit_result = {'name': self.test_object['name'], 'classname': 'xpcshell'}
-
-        # The xUnit package is defined as the path component between the root
-        # dir and the test with path characters replaced with '.' (using Java
-        # class notation).
-        if self.tests_root_dir is not None:
-            self.tests_root_dir = os.path.normpath(self.tests_root_dir)
-            if os.path.normpath(self.test_object['here']).find(self.tests_root_dir) != 0:
-                raise Exception('tests_root_dir is not a parent path of %s' %
-                    self.test_object['here'])
-            relpath = self.test_object['here'][len(self.tests_root_dir):].lstrip('/\\')
-            self.xunit_result['classname'] = relpath.replace('/', '.').replace('\\', '.')
+        name = self.test_object['id']
+        path = self.test_object['path']
 
         # Check for skipped tests
         if 'disabled' in self.test_object:
-            self.log.info('TEST-INFO | skipping %s | %s' %
-                (name, self.test_object['disabled']))
+            message = self.test_object['disabled']
+            if not message:
+                message = 'disabled from xpcshell manifest'
+            self.log.test_start(name)
+            self.log.test_end(name, 'SKIP', message=message)
 
-            self.xunit_result['skipped'] = True
             self.retry = False
-
             self.keep_going = True
             return
 
         # Check for known-fail tests
-        expected = self.test_object['expected'] == 'pass'
+        expect_pass = self.test_object['expected'] == 'pass'
 
         # By default self.appPath will equal the gre dir. If specified in the
         # xpcshell.ini file, set a different app dir for this test.
         if self.app_dir_key and self.app_dir_key in self.test_object:
             rel_app_dir = self.test_object[self.app_dir_key]
             rel_app_dir = os.path.join(self.xrePath, rel_app_dir)
             self.appPath = os.path.abspath(rel_app_dir)
         else:
             self.appPath = None
 
-        test_dir = os.path.dirname(name)
+        test_dir = os.path.dirname(path)
         self.buildXpcsCmd(test_dir)
         head_files, tail_files = self.getHeadAndTailFiles(self.test_object)
         cmdH = self.buildCmdHead(head_files, tail_files, self.xpcsCmd)
 
         # Create a profile and a temp dir that the JS harness can stick
         # a profile and temporary data in
         self.profileDir = self.setupProfileDir()
         self.tempDir = self.setupTempDir()
 
         # The test file will have to be loaded after the head files.
-        cmdT = self.buildCmdTestFile(name)
+        cmdT = self.buildCmdTestFile(path)
 
         args = self.xpcsRunArgs[:]
         if 'debug' in self.test_object:
             args.insert(0, '-d')
 
-        completeCmd = cmdH + cmdT + args
+        # The test name to log
+        cmdI = ['-e', 'const _TEST_NAME = "%s"' % name]
+        self.complete_command = cmdH + cmdT + cmdI + args
 
         if self.test_object.get('dmd') == 'true':
             if sys.platform.startswith('linux'):
                 preloadEnvVar = 'LD_PRELOAD'
                 libdmd = os.path.join(self.xrePath, 'libdmd.so')
             elif sys.platform == 'osx' or sys.platform == 'darwin':
                 preloadEnvVar = 'DYLD_INSERT_LIBRARIES'
                 # self.xrePath is <prefix>/Contents/Resources.
@@ -620,188 +622,133 @@ class XPCShellTestThread(Thread):
                 preloadEnvVar = 'MOZ_REPLACE_MALLOC_LIB'
                 libdmd = os.path.join(self.xrePath, 'dmd.dll')
 
             self.env['PYTHON'] = sys.executable
             self.env['BREAKPAD_SYMBOLS_PATH'] = self.symbolsPath
             self.env['DMD_PRELOAD_VAR'] = preloadEnvVar
             self.env['DMD_PRELOAD_VALUE'] = libdmd
 
-        testTimeoutInterval = HARNESS_TIMEOUT
+        testTimeoutInterval = self.harness_timeout
         # Allow a test to request a multiple of the timeout if it is expected to take long
         if 'requesttimeoutfactor' in self.test_object:
             testTimeoutInterval *= int(self.test_object['requesttimeoutfactor'])
 
         testTimer = None
         if not self.interactive and not self.debuggerInfo:
-            testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(name, proc))
+            testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(proc))
             testTimer.start()
 
         proc = None
-        stdout = None
-        stderr = None
+        process_output = None
 
         try:
-            self.log.info("TEST-INFO | %s | running test ..." % name)
+            self.log.test_start(name)
             if self.verbose:
-                self.logCommand(name, completeCmd, test_dir)
+                self.logCommand(name, self.complete_command, test_dir)
 
-            startTime = time.time()
-            proc = self.launchProcess(completeCmd,
+            proc = self.launchProcess(self.complete_command,
                 stdout=self.pStdout, stderr=self.pStderr, env=self.env, cwd=test_dir, timeout=testTimeoutInterval)
 
+            if hasattr(proc, "pid"):
+                self.proc_ident = proc.pid
+            else:
+                # On mobile, "proc" is just a file.
+                self.proc_ident = name
+
             if self.interactive:
-                self.log.info("TEST-INFO | %s | Process ID: %d" % (name, proc.pid))
+                self.log.info("%s | Process ID: %d" % (name, self.proc_ident))
 
-            stdout, stderr = self.communicate(proc)
+            # Communicate returns a tuple of (stdout, stderr), however we always
+            # redirect stderr to stdout, so the second element is ignored.
+            process_output, _ = self.communicate(proc)
 
             if self.interactive:
                 # Not sure what else to do here...
                 self.keep_going = True
                 return
 
             if testTimer:
                 testTimer.cancel()
 
-            if stdout:
-                self.parse_output(stdout)
-            result = not (self.has_failure_output or
-                          (self.getReturnCode(proc) != 0))
+            if process_output:
+                # For the remote case, stdout is not yet depleted, so we parse
+                # it here all at once.
+                self.parse_output(process_output)
+
+            return_code = self.getReturnCode(proc)
+            passed = (not self.has_failure_output) and (return_code == 0)
 
-            if result != expected:
+            status = 'PASS' if passed else 'FAIL'
+            expected = 'PASS' if expect_pass else 'FAIL'
+            message = 'xpcshell return code: %d' % return_code
+
+            if self.timedout:
+                return
+
+            if status != expected:
                 if self.retry:
-                    self.clean_temp_dirs(name, stdout)
+                    self.log.test_end(name, status, expected=status,
+                                      message="Test failed or timed out, will retry")
+                    self.clean_temp_dirs(path)
                     return
 
-                failureType = "TEST-UNEXPECTED-%s" % ("FAIL" if expected else "PASS")
-                message = "%s | %s | test failed (with xpcshell return code: %d)" % (
-                              failureType, name, self.getReturnCode(proc))
-                if self.output_lines:
-                    message += ", see following log:"
-
-                with LOG_MUTEX:
-                    self.log.error(message)
-                    self.log_output(self.output_lines)
+                self.log.test_end(name, status, expected=expected, message=message)
+                self.log_full_output(self.output_lines)
 
                 self.failCount += 1
-                self.xunit_result["passed"] = False
-
-                self.xunit_result["failure"] = {
-                  "type": failureType,
-                  "message": message,
-                  "text": stdout
-                }
 
                 if self.failureManifest:
                     with open(self.failureManifest, 'a') as f:
                         f.write('[%s]\n' % self.test_object['path'])
                         for k, v in self.test_object.items():
                             f.write('%s = %s\n' % (k, v))
 
             else:
-                now = time.time()
-                timeTaken = (now - startTime) * 1000
-                self.xunit_result["time"] = now - startTime
+                self.log.test_end(name, status, expected=expected, message=message)
+                if self.verbose:
+                    self.log_full_output(self.output_lines)
 
-                with LOG_MUTEX:
-                    self.log.info("TEST-%s | %s | test passed (time: %.3fms)" % ("PASS" if expected else "KNOWN-FAIL", name, timeTaken))
-                    if self.verbose:
-                        self.log_output(self.output_lines)
-
-                self.xunit_result["passed"] = True
                 self.retry = False
 
-                if expected:
+                if expect_pass:
                     self.passCount = 1
                 else:
                     self.todoCount = 1
-                    self.xunit_result["todo"] = True
 
             if self.checkForCrashes(self.tempDir, self.symbolsPath, test_name=name):
                 if self.retry:
-                    self.clean_temp_dirs(name, stdout)
-                    return
-
-                message = "PROCESS-CRASH | %s | application crashed" % name
-                self.failCount = 1
-                self.xunit_result["passed"] = False
-                self.xunit_result["failure"] = {
-                    "type": "PROCESS-CRASH",
-                    "message": message,
-                    "text": stdout
-                }
-
-            if self.logfiles and stdout:
-                self.createLogFile(name, stdout)
-
-        finally:
-            # We can sometimes get here before the process has terminated, which would
-            # cause removeDir() to fail - so check for the process & kill it it needed.
-            if proc and self.poll(proc) is None:
-                self.kill(proc)
-
-                if self.retry:
-                    self.clean_temp_dirs(name, stdout)
+                    self.clean_temp_dirs(path)
                     return
 
-                with LOG_MUTEX:
-                    message = "TEST-UNEXPECTED-FAIL | %s | Process still running after test!" % name
-                    self.log.error(message)
-                    self.log_output(self.output_lines)
+                self.failCount = 1
 
-                self.failCount = 1
-                self.xunit_result["passed"] = False
-                self.xunit_result["failure"] = {
-                  "type": "TEST-UNEXPECTED-FAIL",
-                  "message": message,
-                  "text": stdout
-                }
+            if self.logfiles and process_output:
+                self.createLogFile(name, process_output)
 
-            self.clean_temp_dirs(name, stdout)
+        finally:
+            self.postCheck(proc)
+            self.clean_temp_dirs(path)
 
         if gotSIGINT:
-            self.xunit_result["passed"] = False
-            self.xunit_result["time"] = "0.0"
-            self.xunit_result["failure"] = {
-                "type": "SIGINT",
-                "message": "Received SIGINT",
-                "text": "Received SIGINT (control-C) during test execution."
-            }
-
-            self.log.error("TEST-UNEXPECTED-FAIL | Received SIGINT (control-C) during test execution")
+            self.log.error("Received SIGINT (control-C) during test execution")
             if self.keep_going:
                 gotSIGINT = False
             else:
                 self.keep_going = False
                 return
 
         self.keep_going = True
 
 class XPCShellTests(object):
 
-    log = getGlobalLog()
-
     def __init__(self, log=None):
-        """ Init logging and node status """
-        if log:
-            resetGlobalLog(log)
-
-        # Each method of the underlying logger must acquire the log
-        # mutex before writing to stdout.
-        log_funs = ['debug', 'info', 'warning', 'error', 'critical', 'log']
-        for fun_name in log_funs:
-            unwrapped = getattr(self.log, fun_name, None)
-            if unwrapped:
-                def wrap(fn):
-                    def wrapped(*args, **kwargs):
-                        with LOG_MUTEX:
-                            fn(*args, **kwargs)
-                    return wrapped
-                setattr(self.log, fun_name, wrap(unwrapped))
-
+        """ Initializes node status and logger. """
+        self.log = log
+        self.harness_timeout = HARNESS_TIMEOUT
         self.nodeProc = {}
 
     def buildTestList(self):
         """
           read the xpcshell.ini manifest and set self.alltests to be
           an array of test objects.
 
           if we are chunking tests, it will be done here as well
@@ -834,17 +781,17 @@ class XPCShellTests(object):
           self.alltests based on thisChunk, so we only run a subset.
         """
         totalTests = len(self.alltests)
         testsPerChunk = math.ceil(totalTests / float(self.totalChunks))
         start = int(round((self.thisChunk-1) * testsPerChunk))
         end = int(start + testsPerChunk)
         if end > totalTests:
             end = totalTests
-        self.log.info("Running tests %d-%d/%d", start+1, end, totalTests)
+        self.log.info("Running tests %d-%d/%d" % (start + 1, end, totalTests))
         self.alltests = self.alltests[start:end]
 
     def setAbsPath(self):
         """
           Set the absolute path for xpcshell, httpdjspath and xrepath.
           These 3 variables depend on input from the command line and we need to allow for absolute paths.
           This function is overloaded for a remote solution as os.path* won't work remotely.
         """
@@ -913,19 +860,19 @@ class XPCShellTests(object):
             else:
                 self.env["LD_LIBRARY_PATH"] = ":".join([self.xrePath, self.env["LD_LIBRARY_PATH"]])
 
         if "asan" in self.mozInfo and self.mozInfo["asan"]:
             # ASan symbolizer support
             llvmsym = os.path.join(self.xrePath, "llvm-symbolizer")
             if os.path.isfile(llvmsym):
                 self.env["ASAN_SYMBOLIZER_PATH"] = llvmsym
-                self.log.info("INFO | runxpcshelltests.py | ASan using symbolizer at %s", llvmsym)
+                self.log.info("runxpcshelltests.py | ASan using symbolizer at %s" % llvmsym)
             else:
-                self.log.info("TEST-UNEXPECTED-FAIL | runxpcshelltests.py | Failed to find ASan symbolizer at %s", llvmsym)
+                self.log.error("TEST-UNEXPECTED-FAIL | runxpcshelltests.py | Failed to find ASan symbolizer at %s" % llvmsym)
 
         return self.env
 
     def getPipes(self):
         """
           Determine the value of the stdout and stderr for the test.
           Return value is a list (pStdout, pStderr).
         """
@@ -1021,189 +968,49 @@ class XPCShellTests(object):
     def shutdownNode(self):
         """
           Shut down our node process, if it exists
         """
         for name, proc in self.nodeProc.iteritems():
             self.log.info('Node %s server shutting down ...' % name)
             proc.terminate()
 
-    def writeXunitResults(self, results, name=None, filename=None, fh=None):
-        """
-          Write Xunit XML from results.
-
-          The function receives an iterable of results dicts. Each dict must have
-          the following keys:
-
-            classname - The "class" name of the test.
-            name - The simple name of the test.
-
-          In addition, it must have one of the following saying how the test
-          executed:
-
-            passed - Boolean indicating whether the test passed. False if it
-              failed.
-            skipped - True if the test was skipped.
-
-          The following keys are optional:
-
-            time - Execution time of the test in decimal seconds.
-            failure - Dict describing test failure. Requires keys:
-              type - String type of failure.
-              message - String describing basic failure.
-              text - Verbose string describing failure.
-
-          Arguments:
-
-          |name|, Name of the test suite. Many tools expect Java class dot notation
-            e.g. dom.simple.foo. A directory with '/' converted to '.' is a good
-            choice.
-          |fh|, File handle to write XML to.
-          |filename|, File name to write XML to.
-          |results|, Iterable of tuples describing the results.
-        """
-        if filename is None and fh is None:
-            raise Exception("One of filename or fh must be defined.")
-
-        if name is None:
-            name = "xpcshell"
-        else:
-            assert isinstance(name, basestring)
-
-        if filename is not None:
-            fh = open(filename, 'wb')
-
-        doc = xml.dom.minidom.Document()
-        testsuite = doc.createElement("testsuite")
-        testsuite.setAttribute("name", name)
-        doc.appendChild(testsuite)
-
-        total = 0
-        passed = 0
-        failed = 0
-        skipped = 0
-
-        for result in results:
-            total += 1
-
-            if result.get("skipped", None):
-                skipped += 1
-            elif result["passed"]:
-                passed += 1
-            else:
-                failed += 1
-
-            testcase = doc.createElement("testcase")
-            testcase.setAttribute("classname", result["classname"])
-            testcase.setAttribute("name", result["name"])
-
-            if "time" in result:
-                testcase.setAttribute("time", str(result["time"]))
-            else:
-                # It appears most tools expect the time attribute to be present.
-                testcase.setAttribute("time", "0")
-
-            if "failure" in result:
-                failure = doc.createElement("failure")
-                failure.setAttribute("type", str(result["failure"]["type"]))
-                failure.setAttribute("message", result["failure"]["message"])
-
-                # Lossy translation but required to not break CDATA. Also, text could
-                # be None and Python 2.5's minidom doesn't accept None. Later versions
-                # do, however.
-                cdata = result["failure"]["text"]
-                if not isinstance(cdata, str):
-                    cdata = ""
-
-                cdata = cdata.replace("]]>", "]] >")
-                text = doc.createCDATASection(cdata)
-                failure.appendChild(text)
-                testcase.appendChild(failure)
-
-            if result.get("skipped", None):
-                e = doc.createElement("skipped")
-                testcase.appendChild(e)
-
-            testsuite.appendChild(testcase)
-
-        testsuite.setAttribute("tests", str(total))
-        testsuite.setAttribute("failures", str(failed))
-        testsuite.setAttribute("skip", str(skipped))
-
-        doc.writexml(fh, addindent="  ", newl="\n", encoding="utf-8")
-
-    def post_to_autolog(self, results, name):
-        from moztest.results import TestContext, TestResult, TestResultCollection
-        from moztest.output.autolog import AutologOutput
-
-        context = TestContext(
-            testgroup='b2g xpcshell testsuite',
-            operating_system='android',
-            arch='emulator',
-            harness='xpcshell',
-            hostname=socket.gethostname(),
-            tree='b2g',
-            buildtype='opt',
-            )
-
-        collection = TestResultCollection('b2g emulator testsuite')
-
-        for result in results:
-            duration = result.get('time', 0)
-
-            if 'skipped' in result:
-                outcome = 'SKIPPED'
-            elif 'todo' in result:
-                outcome = 'KNOWN-FAIL'
-            elif result['passed']:
-                outcome = 'PASS'
-            else:
-                outcome = 'UNEXPECTED-FAIL'
-
-            output = None
-            if 'failure' in result:
-                output = result['failure']['text']
-
-            t = TestResult(name=result['name'], test_class=name,
-                           time_start=0, context=context)
-            t.finish(result=outcome, time_end=duration, output=output)
-
-            collection.append(t)
-            collection.time_taken += duration
-
-        out = AutologOutput()
-        out.post(out.make_testgroups(collection))
-
     def buildXpcsRunArgs(self):
         """
           Add arguments to run the test or make it interactive.
         """
         if self.interactive:
             self.xpcsRunArgs = [
             '-e', 'print("To start the test, type |_execute_test();|.");',
             '-i']
         else:
             self.xpcsRunArgs = ['-e', '_execute_test(); quit(0);']
 
     def addTestResults(self, test):
         self.passCount += test.passCount
         self.failCount += test.failCount
         self.todoCount += test.todoCount
-        self.xunitResults.append(test.xunit_result)
+
+    def makeTestId(self, test_object):
+        """Calculate an identifier for a test based on its path or a combination of
+        its path and the source manifest."""
+        path = replaceBackSlashes(test_object['path']);
+        if 'dupe-manifest' in test_object and 'ancestor-manifest' in test_object:
+            return '%s:%s' % (os.path.basename(test_object['ancestor-manifest']), path)
+        return path
 
     def runTests(self, xpcshell, xrePath=None, appPath=None, symbolsPath=None,
                  manifest=None, testdirs=None, testPath=None, mobileArgs=None,
                  interactive=False, verbose=False, keepGoing=False, logfiles=True,
                  thisChunk=1, totalChunks=1, debugger=None,
                  debuggerArgs=None, debuggerInteractive=False,
                  profileName=None, mozInfo=None, sequential=False, shuffle=False,
-                 testsRootDir=None, xunitFilename=None, xunitName=None,
-                 testingModulesDir=None, autolog=False, pluginsPath=None,
+                 testsRootDir=None, testingModulesDir=None, pluginsPath=None,
                  testClass=XPCShellTestThread, failureManifest=None,
-                 on_message=None, **otherOptions):
+                 log=None, stream=None, **otherOptions):
         """Run xpcshell tests.
 
         |xpcshell|, is the xpcshell executable to use to run the tests.
         |xrePath|, if provided, is the path to the XRE to use.
         |appPath|, if provided, is the path to an application directory.
         |symbolsPath|, if provided is the path to a directory containing
           breakpad symbols for processing crashes in tests.
         |manifest|, if provided, is a file containing a list of
@@ -1225,41 +1032,26 @@ class XPCShellTests(object):
         |debuggerInteractive|, if set, allows the debugger to be run in interactive
           mode.
         |profileName|, if set, specifies the name of the application for the profile
           directory if running only a subset of tests.
         |mozInfo|, if set, specifies specifies build configuration information, either as a filename containing JSON, or a dict.
         |shuffle|, if True, execute tests in random order.
         |testsRootDir|, absolute path to root directory of all tests. This is used
           by xUnit generation to determine the package name of the tests.
-        |xunitFilename|, if set, specifies the filename to which to write xUnit XML
-          results.
-        |xunitName|, if outputting an xUnit XML file, the str value to use for the
-          testsuite name.
         |testingModulesDir|, if provided, specifies where JS modules reside.
           xpcshell will register a resource handler mapping this path.
         |otherOptions| may be present for the convenience of subclasses
         """
 
         global gotSIGINT
 
         if testdirs is None:
             testdirs = []
 
-        if xunitFilename is not None or xunitName is not None:
-            if not isinstance(testsRootDir, basestring):
-                raise Exception("testsRootDir must be a str when outputting xUnit.")
-
-            if not os.path.isabs(testsRootDir):
-                testsRootDir = os.path.abspath(testsRootDir)
-
-            if not os.path.exists(testsRootDir):
-                raise Exception("testsRootDir path does not exists: %s" %
-                        testsRootDir)
-
         # Try to guess modules directory.
         # This somewhat grotesque hack allows the buildbot machines to find the
         # modules directory without having to configure the buildbot hosts. This
         # code path should never be executed in local runs because the build system
         # should always set this argument.
         if not testingModulesDir:
             ourDir = os.path.dirname(__file__)
             possible = os.path.join(ourDir, os.path.pardir, 'modules')
@@ -1295,17 +1087,16 @@ class XPCShellTests(object):
         self.symbolsPath = symbolsPath
         self.manifest = manifest
         self.testdirs = testdirs
         self.testPath = testPath
         self.interactive = interactive
         self.verbose = verbose
         self.keepGoing = keepGoing
         self.logfiles = logfiles
-        self.on_message = on_message
         self.totalChunks = totalChunks
         self.thisChunk = thisChunk
         self.profileName = profileName or "xpcshell"
         self.mozInfo = mozInfo
         self.testingModulesDir = testingModulesDir
         self.pluginsPath = pluginsPath
         self.sequential = sequential
 
@@ -1363,17 +1154,16 @@ class XPCShellTests(object):
 
         self.buildTestList()
         if self.singleFile:
             self.sequential = True
 
         if shuffle:
             random.shuffle(self.alltests)
 
-        self.xunitResults = []
         self.cleanup_dir_list = []
         self.try_again_list = []
 
         kwargs = {
             'appPath': self.appPath,
             'xrePath': self.xrePath,
             'testingModulesDir': self.testingModulesDir,
             'debuggerInfo': self.debuggerInfo,
@@ -1385,17 +1175,17 @@ class XPCShellTests(object):
             'profileName': self.profileName,
             'singleFile': self.singleFile,
             'env': self.env, # making a copy of this in the testthreads
             'symbolsPath': self.symbolsPath,
             'logfiles': self.logfiles,
             'xpcshell': self.xpcshell,
             'xpcsRunArgs': self.xpcsRunArgs,
             'failureManifest': failureManifest,
-            'on_message': self.on_message,
+            'harness_timeout': self.harness_timeout,
         }
 
         if self.sequential:
             # Allow user to kill hung xpcshell subprocess with SIGINT
             # when we are only running tests sequentially.
             signal.signal(signal.SIGINT, markGotSIGINT)
 
         if self.debuggerInfo:
@@ -1406,46 +1196,54 @@ class XPCShellTests(object):
             if self.debuggerInfo.interactive:
                 signal.signal(signal.SIGINT, lambda signum, frame: None)
 
         # create a queue of all tests that will run
         tests_queue = deque()
         # also a list for the tests that need to be run sequentially
         sequential_tests = []
         for test_object in self.alltests:
-            name = test_object['path']
-            if self.singleFile and not name.endswith(self.singleFile):
+            # Test identifiers are provided for the convenience of logging. These
+            # start as path names but are rewritten in case tests from the same path
+            # are re-run.
+
+            path = test_object['path']
+            test_object['id'] = self.makeTestId(test_object)
+
+            if self.singleFile and not path.endswith(self.singleFile):
                 continue
 
-            if self.testPath and name.find(self.testPath) == -1:
+            if self.testPath and path.find(self.testPath) == -1:
                 continue
 
             self.testCount += 1
 
             test = testClass(test_object, self.event, self.cleanup_dir_list,
                     tests_root_dir=testsRootDir, app_dir_key=appDirKey,
                     interactive=interactive, verbose=verbose, pStdout=pStdout,
                     pStderr=pStderr, keep_going=keepGoing, log=self.log,
                     mobileArgs=mobileArgs, **kwargs)
             if 'run-sequentially' in test_object or self.sequential:
                 sequential_tests.append(test)
             else:
                 tests_queue.append(test)
 
         if self.sequential:
-            self.log.info("INFO | Running tests sequentially.")
+            self.log.info("Running tests sequentially.")
         else:
-            self.log.info("INFO | Using at most %d threads." % NUM_THREADS)
+            self.log.info("Using at most %d threads." % NUM_THREADS)
 
         # keep a set of NUM_THREADS running tests and start running the
         # tests in the queue at most NUM_THREADS at a time
         running_tests = set()
         keep_going = True
         exceptions = []
         tracebacks = []
+        self.log.suite_start([t['id'] for t in self.alltests])
+
         while tests_queue or running_tests:
             # if we're not supposed to continue and all of the running tests
             # are done, stop
             if not keep_going and not running_tests:
                 break
 
             # if there's room to run more tests, start running them
             while keep_going and tests_queue and (len(running_tests) < NUM_THREADS):
@@ -1531,60 +1329,51 @@ class XPCShellTests(object):
         # Clean up any slacker directories that might be lying around
         # Some might fail because of windows taking too long to unlock them.
         # We don't do anything if this fails because the test slaves will have
         # their $TEMP dirs cleaned up on reboot anyway.
         for directory in self.cleanup_dir_list:
             try:
                 shutil.rmtree(directory)
             except:
-                self.log.info("INFO | %s could not be cleaned up." % directory)
+                self.log.info("%s could not be cleaned up." % directory)
 
         if exceptions:
-            self.log.info("INFO | Following exceptions were raised:")
+            self.log.info("Following exceptions were raised:")
             for t in tracebacks:
                 self.log.error(t)
             raise exceptions[0]
 
         if self.testCount == 0:
-            self.log.error("TEST-UNEXPECTED-FAIL | runxpcshelltests.py | No tests run. Did you pass an invalid --test-path?")
+            self.log.error("No tests run. Did you pass an invalid --test-path?")
             self.failCount = 1
 
         self.log.info("INFO | Result summary:")
         self.log.info("INFO | Passed: %d" % self.passCount)
         self.log.info("INFO | Failed: %d" % self.failCount)
         self.log.info("INFO | Todo: %d" % self.todoCount)
         self.log.info("INFO | Retried: %d" % len(self.try_again_list))
 
-        if autolog:
-            self.post_to_autolog(self.xunitResults, xunitName)
-
-        if xunitFilename is not None:
-            self.writeXunitResults(filename=xunitFilename, results=self.xunitResults,
-                                   name=xunitName)
-
         if gotSIGINT and not keepGoing:
             self.log.error("TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so stopped run. " \
                            "(Use --keep-going to keep running tests after killing one with SIGINT)")
             return False
 
+        self.log.suite_end()
         return self.failCount == 0
 
 class XPCShellOptions(OptionParser):
     def __init__(self):
         """Process command line arguments and call runTests() to do the real work."""
         OptionParser.__init__(self)
 
         addCommonOptions(self)
         self.add_option("--app-path",
                         type="string", dest="appPath", default=None,
                         help="application directory (as opposed to XRE directory)")
-        self.add_option("--autolog",
-                        action="store_true", dest="autolog", default=False,
-                        help="post to autolog")
         self.add_option("--interactive",
                         action="store_true", dest="interactive", default=False,
                         help="don't automatically run tests, drop to an xpcshell prompt")
         self.add_option("--verbose",
                         action="store_true", dest="verbose", default=False,
                         help="always print stdout and stderr from tests")
         self.add_option("--keep-going",
                         action="store_true", dest="keepGoing", default=False,
@@ -1625,38 +1414,38 @@ class XPCShellOptions(OptionParser):
                         type = "string", dest="profileName", default=None,
                         help="name of application profile being tested")
         self.add_option("--build-info-json",
                         type = "string", dest="mozInfo", default=None,
                         help="path to a mozinfo.json including information about the build configuration. defaults to looking for mozinfo.json next to the script.")
         self.add_option("--shuffle",
                         action="store_true", dest="shuffle", default=False,
                         help="Execute tests in random order")
-        self.add_option("--xunit-file", dest="xunitFilename",
-                        help="path to file where xUnit results will be written.")
-        self.add_option("--xunit-suite-name", dest="xunitName",
-                        help="name to record for this xUnit test suite. Many "
-                             "tools expect Java class notation, e.g. "
-                             "dom.basic.foo")
         self.add_option("--failure-manifest", dest="failureManifest",
                         action="store",
                         help="path to file where failure manifest will be written.")
 
 def main():
     parser = XPCShellOptions()
+    structured.commandline.add_logging_group(parser)
     options, args = parser.parse_args()
 
+
+    log = structured.commandline.setup_logging("XPCShell",
+                                               options,
+                                               {"tbpl": sys.stdout})
+
     if len(args) < 2 and options.manifest is None or \
        (len(args) < 1 and options.manifest is not None):
         print >>sys.stderr, """Usage: %s <path to xpcshell> <test dirs>
               or: %s --manifest=test.manifest <path to xpcshell>""" % (sys.argv[0],
                                                               sys.argv[0])
         sys.exit(1)
 
-    xpcsh = XPCShellTests()
+    xpcsh = XPCShellTests(log)
 
     if options.interactive and not options.testPath:
         print >>sys.stderr, "Error: You must specify a test filename in interactive mode!"
         sys.exit(1)
 
     if not xpcsh.runTests(args[0], testdirs=args[1:], **options.__dict__):
         sys.exit(1)
 
--- a/testing/xpcshell/selftest.py
+++ b/testing/xpcshell/selftest.py
@@ -4,18 +4,18 @@
 # http://creativecommons.org/publicdomain/zero/1.0/
 #
 
 from __future__ import with_statement
 import sys, os, unittest, tempfile, shutil
 import mozinfo
 
 from StringIO import StringIO
-from xml.etree.ElementTree import ElementTree
 
+from mozlog import structured
 from mozbuild.base import MozbuildObject
 os.environ.pop('MOZ_OBJDIR', None)
 build_obj = MozbuildObject.from_environment()
 
 from runxpcshelltests import XPCShellTests
 
 mozinfo.find_and_update_from_json()
 
@@ -24,16 +24,19 @@ objdir = build_obj.topobjdir.encode("utf
 if mozinfo.isMac:
   from buildconfig import substs
   xpcshellBin = os.path.join(objdir, "dist", substs['MOZ_MACBUNDLE_NAME'], "Contents", "MacOS", "xpcshell")
 else:
   xpcshellBin = os.path.join(objdir, "dist", "bin", "xpcshell")
   if sys.platform == "win32":
     xpcshellBin += ".exe"
 
+TEST_PASS_STRING = "TEST-PASS"
+TEST_FAIL_STRING = "TEST-UNEXPECTED-FAIL"
+
 SIMPLE_PASSING_TEST = "function run_test() { do_check_true(true); }"
 SIMPLE_FAILING_TEST = "function run_test() { do_check_true(false); }"
 
 ADD_TEST_SIMPLE = '''
 function run_test() { run_next_test(); }
 
 add_test(function test_simple() {
   do_check_true(true);
@@ -69,23 +72,40 @@ add_test(function test_child_simple () {
 '''
 
 CHILD_TEST_HANG = '''
 function run_test () { run_next_test(); }
 
 add_test(function test_child_simple () {
   do_test_pending("hang test");
   do_load_child_test_harness();
-  sendCommand("_log('child_test_start', {_message: 'CHILD-TEST-STARTED'}); " +
+  sendCommand("_testLogger.info('CHILD-TEST-STARTED'); " +
               + "const _TEST_FILE=['test_pass.js']; _execute_test(); ",
               do_test_finished);
   run_next_test();
 });
 '''
 
+SIMPLE_LOOPING_TEST = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_loop () {
+  do_test_pending()
+});
+'''
+
+PASSING_TEST_UNICODE = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_unicode_print () {
+  do_check_eq("\u201c\u201d", "\u201c\u201d");
+  run_next_test();
+});
+'''
+
 ADD_TASK_SINGLE = '''
 Components.utils.import("resource://gre/modules/Promise.jsm");
 
 function run_test() { run_next_test(); }
 
 add_task(function test_task() {
   yield Promise.resolve(true);
   yield Promise.resolve(false);
@@ -259,17 +279,21 @@ function run_test() {
 
 class XPCShellTestsTests(unittest.TestCase):
     """
     Yes, these are unit tests for a unit test harness.
     """
     def setUp(self):
         self.log = StringIO()
         self.tempdir = tempfile.mkdtemp()
-        self.x = XPCShellTests(log=self.log)
+        logger = structured.commandline.setup_logging("selftest%s" % id(self),
+                                                      {},
+                                                      {"tbpl": self.log})
+        self.x = XPCShellTests(logger)
+        self.x.harness_timeout = 15
 
     def tearDown(self):
         shutil.rmtree(self.tempdir)
 
     def writeFile(self, name, contents):
         """
         Write |contents| to a file named |name| in the temp directory,
         and return the full path to the file.
@@ -295,29 +319,28 @@ class XPCShellTestsTests(unittest.TestCa
                 testlines.extend(t[1:])
         self.manifest = self.writeFile("xpcshell.ini", """
 [DEFAULT]
 head =
 tail =
 
 """ + "\n".join(testlines))
 
-    def assertTestResult(self, expected, shuffle=False, xunitFilename=None, verbose=False):
+    def assertTestResult(self, expected, shuffle=False, verbose=False):
         """
         Assert that self.x.runTests with manifest=self.manifest
         returns |expected|.
         """
         self.assertEquals(expected,
                           self.x.runTests(xpcshellBin,
                                           manifest=self.manifest,
                                           mozInfo=mozinfo.info,
                                           shuffle=shuffle,
                                           testsRootDir=self.tempdir,
                                           verbose=verbose,
-                                          xunitFilename=xunitFilename,
                                           sequential=True),
                           msg="""Tests should have %s, log:
 ========
 %s
 ========
 """ % ("passed" if expected else "failed", self.log.getvalue()))
 
     def _assertLog(self, s, expected):
@@ -347,53 +370,53 @@ tail =
         self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
         self.writeManifest(["test_basic.js"])
 
         self.assertTestResult(True)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(1, self.x.passCount)
         self.assertEquals(0, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-PASS")
-        self.assertNotInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_PASS_STRING)
+        self.assertNotInLog(TEST_FAIL_STRING)
 
     def testFail(self):
         """
         Check that a simple failing test without any manifest conditions fails.
         """
         self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
         self.writeManifest(["test_basic.js"])
 
         self.assertTestResult(False)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
-        self.assertNotInLog("TEST-PASS")
+        self.assertInLog(TEST_FAIL_STRING)
+        self.assertNotInLog(TEST_PASS_STRING)
 
     @unittest.skipIf(build_obj.defines.get('MOZ_B2G'),
                      'selftests with child processes fail on b2g desktop builds')
     def testChildPass(self):
         """
         Check that a simple test running in a child process passes.
         """
         self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
         self.writeFile("test_child_pass.js", CHILD_TEST_PASSING)
         self.writeManifest(["test_child_pass.js"])
 
         self.assertTestResult(True, verbose=True)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(1, self.x.passCount)
         self.assertEquals(0, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-PASS")
+        self.assertInLog(TEST_PASS_STRING)
         self.assertInLog("CHILD-TEST-STARTED")
         self.assertInLog("CHILD-TEST-COMPLETED")
-        self.assertNotInLog("TEST-UNEXPECTED-FAIL")
+        self.assertNotInLog(TEST_FAIL_STRING)
 
 
     @unittest.skipIf(build_obj.defines.get('MOZ_B2G'),
                      'selftests with child processes fail on b2g desktop builds')
     def testChildFail(self):
         """
         Check that a simple failing test running in a child process fails.
         """
@@ -401,20 +424,20 @@ tail =
         self.writeFile("test_child_fail.js", CHILD_TEST_FAILING)
         self.writeManifest(["test_child_fail.js"])
 
         self.assertTestResult(False)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("CHILD-TEST-STARTED")
         self.assertInLog("CHILD-TEST-COMPLETED")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     @unittest.skipIf(build_obj.defines.get('MOZ_B2G'),
                      'selftests with child processes fail on b2g desktop builds')
     def testChildHang(self):
         """
         Check that incomplete output from a child process results in a
         test failure.
         """
@@ -422,118 +445,144 @@ tail =
         self.writeFile("test_child_hang.js", CHILD_TEST_HANG)
         self.writeManifest(["test_child_hang.js"])
 
         self.assertTestResult(False)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("CHILD-TEST-STARTED")
         self.assertNotInLog("CHILD-TEST-COMPLETED")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testSyntaxError(self):
         """
         Check that running a test file containing a syntax error produces
         a test failure and expected output.
         """
         self.writeFile("test_syntax_error.js", '"')
         self.writeManifest(["test_syntax_error.js"])
 
         self.assertTestResult(False, verbose=True)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
-        self.assertNotInLog("TEST-PASS")
+        self.assertInLog(TEST_FAIL_STRING)
+        self.assertNotInLog(TEST_PASS_STRING)
+
+    def testUnicodeInAssertMethods(self):
+        """
+        Check that passing unicode characters through an assertion method works.
+        """
+        self.writeFile("test_unicode_assert.js", PASSING_TEST_UNICODE)
+        self.writeManifest(["test_unicode_assert.js"])
+
+        self.assertTestResult(True, verbose=True)
+
+    def testHangingTimeout(self):
+        """
+        Check that a test that never finishes results in the correct error log.
+        """
+        self.writeFile("test_loop.js", SIMPLE_LOOPING_TEST)
+        self.writeManifest(["test_loop.js"])
+
+        old_timeout = self.x.harness_timeout
+        self.x.harness_timeout = 1
+
+        self.assertTestResult(False)
+        self.assertEquals(1, self.x.testCount)
+        self.assertEquals(1, self.x.failCount)
+        self.assertEquals(0, self.x.passCount)
+        self.assertEquals(0, self.x.todoCount)
+        self.assertInLog("TEST-UNEXPECTED-TIMEOUT")
+
+        self.x.harness_timeout = old_timeout
 
     def testPassFail(self):
         """
         Check that running more than one test works.
         """
         self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
         self.writeFile("test_fail.js", SIMPLE_FAILING_TEST)
         self.writeManifest(["test_pass.js", "test_fail.js"])
 
         self.assertTestResult(False)
         self.assertEquals(2, self.x.testCount)
         self.assertEquals(1, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-PASS")
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_PASS_STRING)
+        self.assertInLog(TEST_FAIL_STRING)
 
     def testSkip(self):
         """
         Check that a simple failing test skipped in the manifest does
         not cause failure.
         """
         self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
         self.writeManifest([("test_basic.js", "skip-if = true")])
         self.assertTestResult(True)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(0, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertNotInLog("TEST-UNEXPECTED-FAIL")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_FAIL_STRING)
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testKnownFail(self):
         """
         Check that a simple failing test marked as known-fail in the manifest
         does not cause failure.
         """
         self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
         self.writeManifest([("test_basic.js", "fail-if = true")])
         self.assertTestResult(True)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(0, self.x.failCount)
         self.assertEquals(1, self.x.todoCount)
-        self.assertInLog("TEST-KNOWN-FAIL")
+        self.assertInLog("TEST-FAIL")
         # This should be suppressed because the harness doesn't include
         # the full log from the xpcshell run when things pass.
-        self.assertNotInLog("TEST-UNEXPECTED-FAIL")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_FAIL_STRING)
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testUnexpectedPass(self):
         """
         Check that a simple failing test marked as known-fail in the manifest
         that passes causes an unexpected pass.
         """
         self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
         self.writeManifest([("test_basic.js", "fail-if = true")])
         self.assertTestResult(False)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
         # From the outer (Python) harness
         self.assertInLog("TEST-UNEXPECTED-PASS")
         self.assertNotInLog("TEST-KNOWN-FAIL")
-        # From the inner (JS) harness
-        self.assertInLog("TEST-PASS")
 
     def testReturnNonzero(self):
         """
         Check that a test where xpcshell returns nonzero fails.
         """
         self.writeFile("test_error.js", "throw 'foo'")
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
         self.assertEquals(1, self.x.testCount)
         self.assertEquals(0, self.x.passCount)
         self.assertEquals(1, self.x.failCount)
         self.assertEquals(0, self.x.todoCount)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
-        self.assertNotInLog("TEST-PASS")
+        self.assertInLog(TEST_FAIL_STRING)
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testAddTestSimple(self):
         """
         Ensure simple add_test() works.
         """
         self.writeFile("test_add_test_simple.js", ADD_TEST_SIMPLE)
         self.writeManifest(["test_add_test_simple.js"])
 
@@ -707,149 +756,102 @@ tail =
             self.writeFile(filename, SIMPLE_PASSING_TEST)
             manifest.append(filename)
 
         self.writeManifest(manifest)
         self.assertTestResult(True, shuffle=True)
         self.assertEquals(10, self.x.testCount)
         self.assertEquals(10, self.x.passCount)
 
-    def testXunitOutput(self):
-        """
-        Check that Xunit XML files are written.
-        """
-        self.writeFile("test_00.js", SIMPLE_PASSING_TEST)
-        self.writeFile("test_01.js", SIMPLE_FAILING_TEST)
-        self.writeFile("test_02.js", SIMPLE_PASSING_TEST)
-
-        manifest = [
-            "test_00.js",
-            "test_01.js",
-            ("test_02.js", "skip-if = true")
-        ]
-
-        self.writeManifest(manifest)
-
-        filename = os.path.join(self.tempdir, "xunit.xml")
-
-        self.assertTestResult(False, xunitFilename=filename)
-
-        self.assertTrue(os.path.exists(filename))
-        self.assertTrue(os.path.getsize(filename) > 0)
-
-        tree = ElementTree()
-        tree.parse(filename)
-        suite = tree.getroot()
-
-        self.assertTrue(suite is not None)
-        self.assertEqual(suite.get("tests"), "3")
-        self.assertEqual(suite.get("failures"), "1")
-        self.assertEqual(suite.get("skip"), "1")
-
-        testcases = suite.findall("testcase")
-        self.assertEqual(len(testcases), 3)
-
-        for testcase in testcases:
-            attributes = testcase.keys()
-            self.assertTrue("classname" in attributes)
-            self.assertTrue("name" in attributes)
-            self.assertTrue("time" in attributes)
-
-        self.assertTrue(testcases[1].find("failure") is not None)
-        self.assertTrue(testcases[2].find("skipped") is not None)
-
     def testDoThrowString(self):
         """
         Check that do_throw produces reasonable messages when the
         input is a string instead of an object
         """
         self.writeFile("test_error.js", ADD_TEST_THROW_STRING)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("Passing a string to do_throw")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testDoThrowForeignObject(self):
         """
         Check that do_throw produces reasonable messages when the
         input is a generic object with 'filename', 'message' and 'stack' attributes
         but 'object instanceof Error' returns false
         """
         self.writeFile("test_error.js", ADD_TEST_THROW_OBJECT)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("failure.js")
         self.assertInLog("Error object")
         self.assertInLog("ERROR STACK")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testDoReportForeignObject(self):
         """
         Check that do_report_unexpected_exception produces reasonable messages when the
         input is a generic object with 'filename', 'message' and 'stack' attributes
         but 'object instanceof Error' returns false
         """
         self.writeFile("test_error.js", ADD_TEST_REPORT_OBJECT)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("failure.js")
         self.assertInLog("Error object")
         self.assertInLog("ERROR STACK")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testDoReportRefError(self):
         """
         Check that do_report_unexpected_exception produces reasonable messages when the
         input is a JS-generated Error
         """
         self.writeFile("test_error.js", ADD_TEST_REPORT_REF_ERROR)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("test_error.js")
         self.assertInLog("obj.noSuchFunction is not a function")
         self.assertInLog("run_test@")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testDoReportSyntaxError(self):
         """
         Check that attempting to load a test file containing a syntax error
         generates details of the error in the log
         """
         self.writeFile("test_error.js", LOAD_ERROR_SYNTAX_ERROR)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
-        self.assertInLog("test_error.js")
-        self.assertInLog("test_error.js contains SyntaxError")
-        self.assertInLog("Diagnostic: SyntaxError: missing formal parameter at")
+        self.assertInLog(TEST_FAIL_STRING)
         self.assertInLog("test_error.js:3")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testDoReportNonSyntaxError(self):
         """
         Check that attempting to load a test file containing an error other
         than a syntax error generates details of the error in the log
         """
         self.writeFile("test_error.js", LOAD_ERROR_OTHER_ERROR)
         self.writeManifest(["test_error.js"])
 
         self.assertTestResult(False)
-        self.assertInLog("TEST-UNEXPECTED-FAIL")
-        self.assertInLog("Diagnostic: TypeError: generator function run_test returns a value at")
+        self.assertInLog(TEST_FAIL_STRING)
+        self.assertInLog("TypeError: generator function run_test returns a value at")
         self.assertInLog("test_error.js:4")
-        self.assertNotInLog("TEST-PASS")
+        self.assertNotInLog(TEST_PASS_STRING)
 
     def testAsyncCleanup(self):
         """
         Check that do_register_cleanup handles nicely cleanup tasks that
         return a promise
         """
         self.writeFile("test_asyncCleanup.js", ASYNC_CLEANUP)
         self.writeManifest(["test_asyncCleanup.js"])
--- a/toolkit/components/feeds/test/test_xml.js
+++ b/toolkit/components/feeds/test/test_xml.js
@@ -30,17 +30,17 @@
 function FeedListener(testcase) {
   this.testcase = testcase;
 }
 
 FeedListener.prototype = {
   handleResult: function(result) {
     var feed = result.doc;
     try {
-      do_print(true, "Testing feed " + this.testcase.file.path);
+      do_print("Testing feed " + this.testcase.file.path);
       Assert.ok(isIID(feed, Ci.nsIFeed), "Has feed interface");
 
       if (!eval(this.testcase.expect)) {
         Assert.ok(false, "expect failed for " + this.testcase.desc);
       } else {
         Assert.ok(true, "expect passed for " + this.testcase.desc);
       }
     } catch(e) {