Bug 886570 - Initial integration of structured logs in mochitest; r=ahal,Ms2ger
authorAhmed Kachkach <ahmed.kachkach@gmail.com>
Thu, 17 Jul 2014 09:02:00 +0200
changeset 215655 a283c1237d96d3e37b6735df0cce6ce4fc20ac88
parent 215654 e97c96651f93eaaa13ded899c9448a727e3e20fd
child 215656 f701b67c24051bce8d22a14008310d36b953dd53
push id3857
push userraliiev@mozilla.com
push dateTue, 02 Sep 2014 16:39:23 +0000
treeherdermozilla-beta@5638b907b505 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal, Ms2ger
bugs886570
milestone33.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 886570 - Initial integration of structured logs in mochitest; r=ahal,Ms2ger
build/automationutils.py
build/mobile/remoteautomation.py
build/mobile/robocop/FennecMochitestAssert.java
content/base/test/test_fileapi.html
content/media/webspeech/synth/ipc/test/file_ipc.html
dom/devicestorage/ipc/test_ipc.html
dom/imptests/testharnessreport.js
dom/indexedDB/ipc/test_ipc.html
dom/inputmethod/mochitest/test_bug960946.html
dom/media/tests/ipc/test_ipc.html
dom/plugins/test/mochitest/test_npobject_getters.html
dom/workers/test/rvals_worker.js
js/xpconnect/tests/mochitest/test_bug661980.html
layout/base/tests/bug583889_inner1.html
layout/style/test/test_bug732209.html
testing/marionette/client/marionette/marionette.py
testing/mochitest/browser-harness.xul
testing/mochitest/browser-test.js
testing/mochitest/chrome-harness.js
testing/mochitest/runtests.py
testing/mochitest/runtestsb2g.py
testing/mochitest/runtestsremote.py
testing/mochitest/tests/SimpleTest/MemoryStats.js
testing/mochitest/tests/SimpleTest/SimpleTest.js
testing/mochitest/tests/SimpleTest/TestRunner.js
testing/mochitest/tests/SimpleTest/setup.js
testing/mochitest/tests/index.html
testing/mozbase/mozlog/mozlog/structured/formatters/machformatter.py
testing/mozbase/mozlog/mozlog/structured/handlers/__init__.py
testing/mozbase/mozlog/mozlog/structured/structuredlog.py
testing/mozbase/mozrunner/mozrunner/base/device.py
testing/specialpowers/content/MozillaLogger.js
toolkit/devtools/server/tests/mochitest/test_connectToChild.html
toolkit/mozapps/extensions/test/browser/head.js
--- a/build/automationutils.py
+++ b/build/automationutils.py
@@ -73,17 +73,17 @@ DEBUGGER_INFO = {
     "requiresEscapedArgs": True
   },
 
   # valgrind doesn't explain much about leaks unless you set the
   # '--leak-check=full' flag. But there are a lot of objects that are
   # semi-deliberately leaked, so we set '--show-possibly-lost=no' to avoid
   # uninteresting output from those objects. We set '--smc-check==all-non-file'
   # and '--vex-iropt-register-updates=allregs-at-mem-access' so that valgrind
-  # deals properly with JIT'd JavaScript code.  
+  # deals properly with JIT'd JavaScript code.
   "valgrind": {
     "interactive": False,
     "args": " ".join(["--leak-check=full",
                       "--show-possibly-lost=no",
                       "--smc-check=all-non-file",
                       "--vex-iropt-register-updates=allregs-at-mem-access"])
   }
 }
@@ -597,48 +597,50 @@ class ShutdownLeaks(object):
   def __init__(self, logger):
     self.logger = logger
     self.tests = []
     self.leakedWindows = {}
     self.leakedDocShells = set()
     self.currentTest = None
     self.seenShutdown = False
 
-  def log(self, line):
-    if line[2:11] == "DOMWINDOW":
-      self._logWindow(line)
-    elif line[2:10] == "DOCSHELL":
-      self._logDocShell(line)
-    elif line.startswith("TEST-START"):
-      fileName = line.split(" ")[-1].strip().replace("chrome://mochitests/content/browser/", "")
+  def log(self, message):
+    if message['action'] == 'log':
+        line = message['message']
+        if line[2:11] == "DOMWINDOW":
+          self._logWindow(line)
+        elif line[2:10] == "DOCSHELL":
+          self._logDocShell(line)
+    elif message['action'] == 'test_start':
+      fileName = message['test'].replace("chrome://mochitests/content/browser/", "")
       self.currentTest = {"fileName": fileName, "windows": set(), "docShells": set()}
-    elif line.startswith("INFO TEST-END"):
+    elif message['action'] == 'test_end':
       # don't track a test if no windows or docShells leaked
       if self.currentTest and (self.currentTest["windows"] or self.currentTest["docShells"]):
         self.tests.append(self.currentTest)
       self.currentTest = None
-    elif line.startswith("INFO TEST-START | Shutdown"):
+    elif message['action'] == 'suite_end':
       self.seenShutdown = True
 
   def process(self):
     for test in self._parseLeakingTests():
       for url, count in self._zipLeakedWindows(test["leakedWindows"]):
-        self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]", test["fileName"], count, url)
+        self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d window(s) until shutdown [url = %s]" % (test["fileName"], count, url))
 
       if test["leakedDocShells"]:
-        self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown", test["fileName"], len(test["leakedDocShells"]))
+        self.logger("TEST-UNEXPECTED-FAIL | %s | leaked %d docShell(s) until shutdown" % (test["fileName"], len(test["leakedDocShells"])))
 
   def _logWindow(self, line):
     created = line[:2] == "++"
     pid = self._parseValue(line, "pid")
     serial = self._parseValue(line, "serial")
 
     # log line has invalid format
     if not pid or not serial:
-      self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
+      self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
       return
 
     key = pid + "." + serial
 
     if self.currentTest:
       windows = self.currentTest["windows"]
       if created:
         windows.add(key)
@@ -649,17 +651,17 @@ class ShutdownLeaks(object):
 
   def _logDocShell(self, line):
     created = line[:2] == "++"
     pid = self._parseValue(line, "pid")
     id = self._parseValue(line, "id")
 
     # log line has invalid format
     if not pid or not id:
-      self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>", line)
+      self.logger("TEST-UNEXPECTED-FAIL | ShutdownLeaks | failed to parse line <%s>" % line)
       return
 
     key = pid + "." + id
 
     if self.currentTest:
       docShells = self.currentTest["docShells"]
       if created:
         docShells.add(key)
--- a/build/mobile/remoteautomation.py
+++ b/build/mobile/remoteautomation.py
@@ -16,21 +16,23 @@ import mozcrash
 # signatures for logcat messages that we don't care about much
 fennecLogcatFilters = [ "The character encoding of the HTML document was not declared",
                         "Use of Mutation Events is deprecated. Use MutationObserver instead.",
                         "Unexpected value from nativeGetEnabledTags: 0" ]
 
 class RemoteAutomation(Automation):
     _devicemanager = None
 
-    def __init__(self, deviceManager, appName = '', remoteLog = None):
+    def __init__(self, deviceManager, appName = '', remoteLog = None,
+                 processArgs=None):
         self._devicemanager = deviceManager
         self._appName = appName
         self._remoteProfile = None
         self._remoteLog = remoteLog
+        self._processArgs = processArgs or {};
 
         # Default our product to fennec
         self._product = "fennec"
         self.lastTestSeen = "remoteautomation.py"
         Automation.__init__(self)
 
     def setDeviceManager(self, deviceManager):
         self._devicemanager = deviceManager
@@ -179,116 +181,142 @@ class RemoteAutomation(Automation):
 #TODO: figure out which platform require NO_EM_RESTART
 #        return app, ['--environ:NO_EM_RESTART=1'] + args
         return app, args
 
     def Process(self, cmd, stdout = None, stderr = None, env = None, cwd = None):
         if stdout == None or stdout == -1 or stdout == subprocess.PIPE:
             stdout = self._remoteLog
 
-        return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName)
+        return self.RProcess(self._devicemanager, cmd, stdout, stderr, env, cwd, self._appName,
+                             **self._processArgs)
 
     # be careful here as this inner class doesn't have access to outer class members
     class RProcess(object):
         # device manager process
         dm = None
-        def __init__(self, dm, cmd, stdout = None, stderr = None, env = None, cwd = None, app = None):
+        def __init__(self, dm, cmd, stdout=None, stderr=None, env=None, cwd=None, app=None,
+                     messageLogger=None):
             self.dm = dm
             self.stdoutlen = 0
             self.lastTestSeen = "remoteautomation.py"
             self.proc = dm.launchProcess(cmd, stdout, cwd, env, True)
+            self.messageLogger = messageLogger
+
             if (self.proc is None):
                 if cmd[0] == 'am':
                     self.proc = stdout
                 else:
                     raise Exception("unable to launch process")
             self.procName = cmd[0].split('/')[-1]
             if cmd[0] == 'am' and cmd[1] == "instrument":
                 self.procName = app
                 print "Robocop process name: "+self.procName
 
             # Setting timeout at 1 hour since on a remote device this takes much longer
             self.timeout = 3600
             # The benefit of the following sleep is unclear; it was formerly 15 seconds
             time.sleep(1)
 
+            # Used to buffer log messages until we meet a line break
+            self.logBuffer = ""
+
         @property
         def pid(self):
             pid = self.dm.processExist(self.procName)
             # HACK: we should probably be more sophisticated about monitoring
             # running processes for the remote case, but for now we'll assume
             # that this method can be called when nothing exists and it is not
             # an error
             if pid is None:
                 return 0
             return pid
 
-        @property
-        def stdout(self):
+        def read_stdout(self):
             """ Fetch the full remote log file using devicemanager and return just
-                the new log entries since the last call (as a multi-line string).
+                the new log entries since the last call (as a list of messages or lines).
             """
-            if self.dm.fileExists(self.proc):
-                try:
-                    newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
-                except DMError:
-                    # we currently don't retry properly in the pullFile
-                    # function in dmSUT, so an error here is not necessarily
-                    # the end of the world
-                    return ''
-                self.stdoutlen += len(newLogContent)
-                # Match the test filepath from the last TEST-START line found in the new
-                # log content. These lines are in the form:
-                # 1234 INFO TEST-START | /filepath/we/wish/to/capture.html\n
+            if not self.dm.fileExists(self.proc):
+                return []
+            try:
+                newLogContent = self.dm.pullFile(self.proc, self.stdoutlen)
+            except DMError:
+                # we currently don't retry properly in the pullFile
+                # function in dmSUT, so an error here is not necessarily
+                # the end of the world
+                return []
+            if not newLogContent:
+                return []
+
+            self.stdoutlen += len(newLogContent)
+
+            if self.messageLogger is None:
                 testStartFilenames = re.findall(r"TEST-START \| ([^\s]*)", newLogContent)
                 if testStartFilenames:
                     self.lastTestSeen = testStartFilenames[-1]
-                return newLogContent.strip('\n').strip()
-            else:
-                return ''
+                print newLogContent
+                return [newLogContent]
+
+            self.logBuffer += newLogContent
+            lines = self.logBuffer.split('\n')
+            if not lines:
+                return
+
+            # We only keep the last (unfinished) line in the buffer
+            self.logBuffer = lines[-1]
+            del lines[-1]
+            messages = []
+            for line in lines:
+                # This passes the line to the logger (to be logged or buffered)
+                # and returns a list of structured messages (dict) or None, depending on the log
+                parsed_messages = self.messageLogger.write(line)
+                for message in parsed_messages:
+                    if message['action'] == 'test_start':
+                        self.lastTestSeen = message['test']
+                messages += parsed_messages
+            return messages
 
         @property
         def getLastTestSeen(self):
             return self.lastTestSeen
 
         # Wait for the remote process to end (or for its activity to go to background).
         # While waiting, periodically retrieve the process output and print it.
         # If the process is still running after *timeout* seconds, return 1;
         # If the process is still running but no output is received in *noOutputTimeout*
         # seconds, return 2;
         # Else, once the process exits/goes to background, return 0.
         def wait(self, timeout = None, noOutputTimeout = None):
             timer = 0
             noOutputTimer = 0
-            interval = 20 
+            interval = 20
 
             if timeout == None:
                 timeout = self.timeout
 
             status = 0
             while (self.dm.getTopActivity() == self.procName):
                 # retrieve log updates every 60 seconds
-                if timer % 60 == 0: 
-                    t = self.stdout
-                    if t != '':
-                        print t
+                if timer % 60 == 0:
+                    messages = self.read_stdout()
+                    if messages:
                         noOutputTimer = 0
 
                 time.sleep(interval)
                 timer += interval
                 noOutputTimer += interval
                 if (timer > timeout):
                     status = 1
                     break
                 if (noOutputTimeout and noOutputTimer > noOutputTimeout):
                     status = 2
                     break
 
             # Flush anything added to stdout during the sleep
-            print self.stdout
+            self.read_stdout()
 
             return status
 
         def kill(self, stagedShutdown = False):
             if stagedShutdown:
                 # Trigger an ANR report with "kill -3" (SIGQUIT)
                 self.dm.killProcess(self.procName, 3)
                 time.sleep(3)
--- a/build/mobile/robocop/FennecMochitestAssert.java
+++ b/build/mobile/robocop/FennecMochitestAssert.java
@@ -7,17 +7,16 @@ package org.mozilla.gecko;
 import java.util.LinkedList;
 
 import android.os.SystemClock;
 
 public class FennecMochitestAssert implements Assert {
     private LinkedList<testInfo> mTestList = new LinkedList<testInfo>();
 
     // Internal state variables to make logging match up with existing mochitests
-    private int mLineNumber = 0;
     private int mPassed = 0;
     private int mFailed = 0;
     private int mTodo = 0;
 
     // Used to write the first line of the test file
     private boolean mLogStarted = false;
 
     // Used to write the test-start/test-end log lines
@@ -40,35 +39,35 @@ public class FennecMochitestAssert imple
     }
 
     /** Set the filename used for dumpLog. */
     public void setLogFile(String filename) {
         FennecNativeDriver.setLogFile(filename);
 
         String message;
         if (!mLogStarted) {
-            dumpLog(Integer.toString(mLineNumber++) + " INFO SimpleTest START");
+            dumpLog("SimpleTest START");
             mLogStarted = true;
         }
 
         if (mLogTestName != "") {
             long diff = SystemClock.uptimeMillis() - mStartTime;
-            message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
+            message = "TEST-END | " + mLogTestName;
             message += " | finished in " + diff + "ms";
             dumpLog(message);
             mLogTestName = "";
         }
     }
 
     public void setTestName(String testName) {
         String[] nameParts = testName.split("\\.");
         mLogTestName = nameParts[nameParts.length - 1];
         mStartTime = SystemClock.uptimeMillis();
 
-        dumpLog(Integer.toString(mLineNumber++) + " INFO TEST-START | " + mLogTestName);
+        dumpLog("TEST-START | " + mLogTestName);
     }
 
     class testInfo {
         public boolean mResult;
         public String mName;
         public String mDiag;
         public boolean mTodo;
         public boolean mInfo;
@@ -90,17 +89,17 @@ public class FennecMochitestAssert imple
         }
         if (test.mResult)
         {
             resultString = passString;
         }
         String diag = test.mName;
         if (test.mDiag != null) diag += " - " + test.mDiag;
 
-        String message = Integer.toString(mLineNumber++) + " INFO " + resultString + " | " + mLogTestName + " | " + diag;
+        String message = resultString + " | " + mLogTestName + " | " + diag;
         dumpLog(message);
 
         if (test.mInfo) {
             // do not count TEST-INFO messages
         } else if (test.mTodo) {
             mTodo++;
         } else if (isError) {
             mFailed++;
@@ -112,31 +111,31 @@ public class FennecMochitestAssert imple
         }
     }
 
     public void endTest() {
         String message;
 
         if (mLogTestName != "") {
             long diff = SystemClock.uptimeMillis() - mStartTime;
-            message = Integer.toString(mLineNumber++) + " INFO TEST-END | " + mLogTestName;
+            message = "TEST-END | " + mLogTestName;
             message += " | finished in " + diff + "ms";
             dumpLog(message);
             mLogTestName = "";
         }
 
-        message = Integer.toString(mLineNumber++) + " INFO TEST-START | Shutdown";
+        message = "TEST-START | Shutdown";
         dumpLog(message);
-        message = Integer.toString(mLineNumber++) + " INFO Passed: " + Integer.toString(mPassed);
+        message = "Passed: " + Integer.toString(mPassed);
         dumpLog(message);
-        message = Integer.toString(mLineNumber++) + " INFO Failed: " + Integer.toString(mFailed);
+        message = "Failed: " + Integer.toString(mFailed);
         dumpLog(message);
-        message = Integer.toString(mLineNumber++) + " INFO Todo: " + Integer.toString(mTodo);
+        message = "Todo: " + Integer.toString(mTodo);
         dumpLog(message);
-        message = Integer.toString(mLineNumber++) + " INFO SimpleTest FINISHED";
+        message = "SimpleTest FINISHED";
         dumpLog(message);
     }
 
     public void ok(boolean condition, String name, String diag) {
         testInfo test = new testInfo(condition, name, diag, false, false);
         _logMochitestResult(test, "TEST-PASS", "TEST-UNEXPECTED-FAIL");
         mTestList.add(test);
     }
--- a/content/base/test/test_fileapi.html
+++ b/content/base/test/test_fileapi.html
@@ -144,17 +144,16 @@ is(r.readyState, FileReader.EMPTY,
   "readyState in test reader get result without reading");
 is(r.error, null,
   "no error in test reader get result without reading");
 is(r.result, null,
   "result in test reader get result without reading");
 
 // Test loading an empty file works (and doesn't crash!)
 var emptyFile = createFileWithData("");
-dump("hello nurse");
 r = new FileReader();
 r.onload = getLoadHandler("", 0, "empty no encoding reading");
 r.readAsText(emptyFile, "");
 expectedTestCount++;
 
 r = new FileReader();
 r.onload = getLoadHandler("", 0, "empty utf8 reading");
 r.readAsText(emptyFile, "utf8");
@@ -422,18 +421,18 @@ function getLoadHandlerForArrayBuffer(ex
     testHasRun();
   }
 }
 
 function testHasRun() {
  //alert(testRanCounter);
  ++testRanCounter;
  if (testRanCounter == expectedTestCount) {
-    is(onloadHasRunText, true, "onload text should have fired by now"); 
-    is(onloadHasRunBinary, true, "onload binary should have fired by now"); 
+    is(onloadHasRunText, true, "onload text should have fired by now");
+    is(onloadHasRunBinary, true, "onload binary should have fired by now");
     SimpleTest.finish();
   }
 }
 
 function createFileWithData(fileData) {
   var dirSvc = SpecialPowers.Cc["@mozilla.org/file/directory_service;1"].getService(SpecialPowers.Ci.nsIProperties);
   var testFile = dirSvc.get("ProfD", SpecialPowers.Ci.nsIFile);
   testFile.append("fileAPItestfile" + fileNum);
--- a/content/media/webspeech/synth/ipc/test/file_ipc.html
+++ b/content/media/webspeech/synth/ipc/test/file_ipc.html
@@ -32,44 +32,40 @@
         sendAsyncMessage("test:SpeechSynthesis:ipcTestComplete", {
           result: JSON.stringify(TestRunner._failedTests)
         });
 
         if (oldComplete) {
           oldComplete();
         }
       };
-
-      let oldLog = TestRunner.log;
-      TestRunner.log = function(msg) {
+      TestRunner.structuredLogger._dumpMessage = function(msg) {
         sendAsyncMessage("test:SpeechSynthesis:ipcTestMessage", { msg: msg });
       }
     }
 
-    let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
-
+    let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
+    function validStructuredMessage(message) {
+      return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
+    }
     function onTestMessage(data) {
-      let message = SpecialPowers.wrap(data).json.msg;
-      let match = regex.exec(message);
-      if (match) {
-        let state = match[1];
-        let details = match[2] + " | " + match[3];
+      let message = SpecialPowers.wrap(data).data.msg;
 
-        switch (state) {
-          case "TEST-PASS":
-          case "TEST-KNOWN-FAIL":
-            ok(true, details);
-            break;
+      if (validStructuredMessage(message)) {
+        if (message.test === undefined || message.message === undefined) {
+            return;
+        }
 
-          case "TEST-UNEXPECTED-FAIL":
-          case "TEST-UNEXPECTED-PASS":
-            ok(false, details);
+        let details = message.test + " | " + message.message;
+
+        switch(message.action) {
+          case "test_status":
+          case "test_end":
+            ok(message.expected === undefined, message.test, message.message);
             break;
-
-          case "TEST-DEBUG-INFO":
           default:
             info(details);
         }
       }
     }
 
     function onTestComplete() {
       let comp = SpecialPowers.wrap(SpecialPowers.Components);
--- a/dom/devicestorage/ipc/test_ipc.html
+++ b/dom/devicestorage/ipc/test_ipc.html
@@ -31,44 +31,40 @@
           result: JSON.stringify(TestRunner._failedTests)
         });
 
         if (oldComplete) {
           oldComplete();
         }
       };
 
-      let oldLog = TestRunner.log;
-      TestRunner.log = function(msg) {
+      TestRunner.structuredLogger._dumpMessage = function(msg) {
         sendAsyncMessage("test:DeviceStorage:ipcTestMessage", { msg: msg });
       }
     }
 
-    let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
-
+    let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
+    function validStructuredMessage(message) {
+      return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
+    }
     function onTestMessage(data) {
-      let message = SpecialPowers.wrap(data).json.msg;
-      let match = regex.exec(message);
-      if (match) {
-        let state = match[1];
-        let details = match[2] + " | " + match[3];
+      let message = SpecialPowers.wrap(data).data.msg;
 
-        switch (state) {
-          case "TEST-PASS":
-          case "TEST-KNOWN-FAIL":
-            ok(true, details);
-            break;
+      if (validStructuredMessage(message)) {
+        if (message.test === undefined || message.message === undefined) {
+          return;
+        }
 
-          case "TEST-UNEXPECTED-FAIL":
-          case "TEST-UNEXPECTED-PASS":
-            ok(false, details);
+        switch (message.action) {
+          case "test_status":
+          case "test_end":
+            ok(message.expected === undefined, message.test, message.message);
             break;
-
-          case "TEST-DEBUG-INFO":
           default:
+            let details = message.test + " | " + message.message;
             info(details);
         }
       }
     }
 
     function onTestComplete() {
       let comp = SpecialPowers.wrap(SpecialPowers.Components);
       let mm = SpecialPowers.getBrowserFrameMessageManager(iframe);
--- a/dom/imptests/testharnessreport.js
+++ b/dom/imptests/testharnessreport.js
@@ -37,21 +37,27 @@ var W3CTest = {
 
   /**
    * Reference to the TestRunner object in the parent frame.
    */
   "runner": parent === this ? null : parent.TestRunner || parent.wrappedJSObject.TestRunner,
 
   /**
    * Prefixes for the error logging. Indexed first by int(todo) and second by
-   * int(result).
+   * int(result). Also contains the test's status, and expected status.
    */
   "prefixes": [
-    ["TEST-UNEXPECTED-FAIL", "TEST-PASS"],
-    ["TEST-KNOWN-FAIL", "TEST-UNEXPECTED-PASS"]
+    [
+      {status: 'FAIL', expected: 'PASS', message: "TEST-UNEXPECTED-FAIL"},
+      {status: 'PASS', expected: 'PASS', message: "TEST-PASS"}
+    ],
+    [
+      {status: 'FAIL', expected: 'FAIL', message: "TEST-KNOWN-FAIL"},
+      {status: 'PASS', expected: 'FAIL', message: "TEST-UNEXPECTED-PASS"}
+    ]
   ],
 
   /**
    * Prefix of the path to parent of the the failures directory.
    */
   "pathprefix": "/tests/dom/imptests/",
 
   /**
@@ -128,24 +134,31 @@ var W3CTest = {
     return aTest.name + (aTest.message ? ": " + aTest.message : "");
   },
 
   /**
    * Lets the test runner know about a test result.
    */
   "_log": function(test) {
     var url = this.getURL();
-    var msg = this.prefixes[+test.todo][+test.result] + " | ";
-    if (url) {
-      msg += url;
-    }
-    msg += " | " + this.formatTestMessage(test);
+    var message = this.formatTestMessage(test);
+    var result = this.prefixes[+test.todo][+test.result];
+
     if (this.runner) {
-      this.runner[(test.result === !test.todo) ? "log" : "error"](msg);
+      this.runner.structuredLogger.testStatus(url,
+                                              test.name,
+                                              result.status,
+                                              result.expected,
+                                              message);
     } else {
+      var msg = result.message + " | ";
+      if (url) {
+        msg += url;
+      }
+      msg += " | " + this.formatTestMessage(test);
       dump(msg + "\n");
     }
   },
 
   /**
    * Logs a message about collapsed messages (if any), and resets the counter.
    */
   "_logCollapsedMessages": function() {
--- a/dom/indexedDB/ipc/test_ipc.html
+++ b/dom/indexedDB/ipc/test_ipc.html
@@ -44,54 +44,43 @@
 
         sendAsyncMessage("test:indexedDB:ipcTestComplete");
 
         if (oldComplete) {
           oldComplete();
         }
       };
 
-      function sendTestMessage(msg) {
+      TestRunner.structuredLogger._dumpMessage = function(msg) {
         sendAsyncMessage("test:indexedDB:ipcTestMessage", { msg: msg });
       }
-
-      TestRunner.log = sendTestMessage;
-      TestRunner.error = sendTestMessage;
     }
 
-    let regexString =
-      "^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL" +
-      "|TEST-DEBUG-INFO|TEST-INFO) \\| ([^\\|]+) \\|(.*)";
-
-    let regex = new RegExp(regexString);
-
     let seenTestMessage = false;
 
+    let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
+    function validStructuredMessage(message) {
+      return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
+    }
     function onTestMessage(data) {
       seenTestMessage = true;
       let message = SpecialPowers.wrap(data).data.msg;
-      let match = regex.exec(message);
-      if (match) {
-        let state = match[1];
-        let details = match[2] + " | " + match[3];
 
-        switch (state) {
-          case "TEST-PASS":
-          case "TEST-KNOWN-FAIL":
-            ok(true, details);
+      if (validStructuredMessage(message)) {
+        if (message.test === undefined || message.message === undefined) {
+          return;
+        }
+
+        switch (message.action) {
+          case "test_status":
+          case "test_end":
+            ok(message.expected === undefined, message.test, message.message);
             break;
-
-          case "TEST-UNEXPECTED-FAIL":
-          case "TEST-UNEXPECTED-PASS":
-            ok(false, details);
-            break;
-
-          case "TEST-INFO":
-          case "TEST-DEBUG-INFO":
           default:
+            let details = message.test + " | " + message.message;
             info(details);
         }
       }
     }
 
     let usingChildProcess = false;
 
     function onProcessType(data) {
--- a/dom/inputmethod/mochitest/test_bug960946.html
+++ b/dom/inputmethod/mochitest/test_bug960946.html
@@ -25,17 +25,17 @@ var result = ["keydown", "keypress", "ke
 inputmethod_setup(function() {
   runTest();
 });
 
 // The frame script running in file_test_backspace_event.html.
 function appFrameScript() {
   let input = content.document.getElementById('test-input');
   input.onkeydown = input.onkeypress = input.onkeyup = function(event) {
-    dump('key event was fired in file_test_backspace_event.html.');
+    dump('key event was fired in file_test_backspace_event.html.\n');
     sendAsyncMessage('test:KeyBoard:keyEvent', {'type':event.type});
   };
 }
 
 function runTest() {
   let im = navigator.mozInputMethod;
 
   im.oninputcontextchange = function() {
--- a/dom/media/tests/ipc/test_ipc.html
+++ b/dom/media/tests/ipc/test_ipc.html
@@ -19,18 +19,18 @@
     SimpleTest.requestLongerTimeout(100);
 
     // Disable crash observers as it breaks later tests.
     function iframeScriptFirst() {
       SpecialPowers.prototype.registerProcessCrashObservers = function() { };
       SpecialPowers.prototype.unregisterProcessCrashObservers = function() { };
 
       content.wrappedJSObject.RunSet.reloadAndRunAll({
-          preventDefault: function() { },
-          __exposedProps__: { preventDefault: 'r' }
+        preventDefault: function() { },
+        __exposedProps__: { preventDefault: 'r' }
       });
     }
 
     function iframeScriptSecond() {
       let TestRunner = content.wrappedJSObject.TestRunner;
       let oldComplete = TestRunner.onComplete;
 
       TestRunner.onComplete = function() {
@@ -39,47 +39,41 @@
         sendAsyncMessage("test:PeerConnection:ipcTestComplete", {
           result: JSON.stringify(TestRunner._failedTests)
         });
 
         if (oldComplete) {
           oldComplete();
         }
       };
-      let oldLog = TestRunner.log;
-      TestRunner.log = function(msg) {
+
+      TestRunner.structuredLogger._dumpMessage = function(msg) {
         sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
-      };
-      TestRunner.error = function(msg) {
-        sendAsyncMessage("test:PeerConnection:ipcTestMessage", { msg: msg });
-      };
+      }
     }
 
-    let regex = /^(TEST-PASS|TEST-UNEXPECTED-PASS|TEST-KNOWN-FAIL|TEST-UNEXPECTED-FAIL|TEST-DEBUG-INFO) \| ([^\|]+) \|(.*)/;
-
+    let VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
+    function validStructuredMessage(message) {
+      return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
+    }
     function onTestMessage(data) {
-      let message = SpecialPowers.wrap(data).json.msg;
-      let match = regex.exec(message);
-      if (match) {
-        let state = match[1];
-        let details = match[2] + " | " + match[3];
+      let message = SpecialPowers.wrap(data).data.msg;
 
-        switch (state) {
-          case "TEST-PASS":
-          case "TEST-KNOWN-FAIL":
-            ok(true, details);
-            break;
+      if (validStructuredMessage(message)) {
+        if (message.test === undefined || message.message === undefined) {
+          return;
+        }
 
-          case "TEST-UNEXPECTED-FAIL":
-          case "TEST-UNEXPECTED-PASS":
-            ok(false, details);
+        switch (message.action) {
+          case "test_status":
+          case "test_end":
+            ok(message.expected === undefined, message.test, message.message);
             break;
-
-          case "TEST-DEBUG-INFO":
           default:
+            let details = message.test + " | " + message.message;
             info(details);
         }
       }
     }
 
     function onTestComplete() {
       let comp = SpecialPowers.wrap(SpecialPowers.Components);
       let mm = SpecialPowers.getBrowserFrameMessageManager(iframe);
--- a/dom/plugins/test/mochitest/test_npobject_getters.html
+++ b/dom/plugins/test/mochitest/test_npobject_getters.html
@@ -1,16 +1,16 @@
 <head>
   <title>NPNV*NPObject accessibility tests</title>
   <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <script type="application/javascript" src="utils.js"></script>
 
 <body onload="runTests()">
   <script class="testbody" type="application/javascript">
-  dump('lastScript');
+  dump('lastScript\n');
 
   SimpleTest.waitForExplicitFinish();
   setTestPluginEnabledState(SpecialPowers.Ci.nsIPluginTag.STATE_ENABLED);
 
   function runTests() {
     ok(document.getElementById('plugin1').pluginFoundElement, "plugin1.pluginFoundElement (NPNVPluginElementNPObject)", document.getElementById('plugin1').pluginFoundElement);
     ok(window.pluginFoundWindow, "window.pluginFoundWindow (NPNVWindowNPObject)", window.pluginFoundWindow);
 
--- a/dom/workers/test/rvals_worker.js
+++ b/dom/workers/test/rvals_worker.js
@@ -2,12 +2,12 @@ onmessage = function(evt) {
   postMessage(postMessage('ignore') == undefined);
 
   var id = setInterval(function() {}, 200);
   postMessage(clearInterval(id) == undefined);
 
   id = setTimeout(function() {}, 200);
   postMessage(clearTimeout(id) == undefined);
 
-  postMessage(dump(42) == undefined);
+  postMessage(dump(42 + '\n') == undefined);
 
   postMessage('finished');
 }
--- a/js/xpconnect/tests/mochitest/test_bug661980.html
+++ b/js/xpconnect/tests/mochitest/test_bug661980.html
@@ -7,17 +7,17 @@ https://bugzilla.mozilla.org/show_bug.cg
   <title>Test for Bug 661980</title>
   <script type="application/javascript" src="/tests/SimpleTest/SimpleTest.js"></script>
   <link rel="stylesheet" type="text/css" href="/tests/SimpleTest/test.css"/>
 </head>
 <body>
 <a target="_blank" href="https://bugzilla.mozilla.org/show_bug.cgi?id=661980">Mozilla Bug 661980</a>
 <p id="display"></p>
 <div id="content" style="display: none">
-  
+
 </div>
 <pre id="test">
 <script type="application/javascript">
 
 /** Test for Bug 661980 **/
 
 // While not currently needed, make this as similar as possible to a real
 // EventTarget just to make sure that we're tripping on the wrapping and
@@ -25,17 +25,17 @@ https://bugzilla.mozilla.org/show_bug.cg
 var fakeTarget = {
   addEventListener: function() {},
   removeEventListener: function() {},
   dispatchEvent: function() {}
 }
 
 var mouseevent = document.createEvent("MouseEvent");
 var didThrow = false;
-dump("hello nurse");
+dump("hello nurse\n");
 try {
   mouseevent.initMouseEvent("mouseover",
                             false, false,
                             window,
                             1, 2, 3, 4, 5,
                             false, false, false, false,
                             0,
                             fakeTarget);
--- a/layout/base/tests/bug583889_inner1.html
+++ b/layout/base/tests/bug583889_inner1.html
@@ -28,21 +28,21 @@ function runTest() {
 
   postPos();
 
   inner.src = "bug583889_inner2.html#id1";
   inner.onload = grabEventAndGo;
   // Let parent process sent message.
   // Wait for onLoad event from 'inner' iframe.
   yield;
-  
+
   postPos();
-  
+
   inner.onload = null;
-  dump("hi");
+  dump("hi\n");
   inner.contentWindow.location = "bug583889_inner2.html#id2"
   waitAsync();
   // Let parent process sent message.
   // Let 'inner' iframe update itself.
   yield;
 
   postPos();
 
--- a/layout/style/test/test_bug732209.html
+++ b/layout/style/test/test_bug732209.html
@@ -69,17 +69,17 @@ addLoadEvent(function() {
   var spans = $("content").querySelectorAll("span");
   for (var i = 0; i < spans.length; ++i) {
     is(getComputedStyle(spans[i], "").color, "rgb(0, 128, 0)",
        "Span " + spans[i].id + " should be green");
   }
 
   try {
     var sheet = $("cross-origin-sheet").sheet;
-    dump('aaa');
+    dump('aaa\n');
     is(sheet.cssRules.length, 2,
        "Should be able to get length of list of rules");
     is(sheet.cssRules[0].style.color, "green",
        "Should be able to read individual rules");
   } catch (e) {
     ok(false,
        "Should be allowed to access cross-origin sheet that opted in with CORS: " + e);
   }
--- a/testing/marionette/client/marionette/marionette.py
+++ b/testing/marionette/client/marionette/marionette.py
@@ -457,17 +457,17 @@ class Marionette(object):
     TIMEOUT_SCRIPT = 'script'
     TIMEOUT_PAGE = 'page load'
 
     def __init__(self, host='localhost', port=2828, app=None, app_args=None, bin=None,
                  profile=None, emulator=None, sdcard=None, emulator_img=None,
                  emulator_binary=None, emulator_res=None, connect_to_running_emulator=False,
                  gecko_log=None, homedir=None, baseurl=None, no_window=False, logdir=None,
                  busybox=None, symbols_path=None, timeout=None, socket_timeout=360,
-                 device_serial=None, adb_path=None):
+                 device_serial=None, adb_path=None, process_args=None):
         self.host = host
         self.port = self.local_port = port
         self.bin = bin
         self.instance = None
         self.session = None
         self.window = None
         self.runner = None
         self.emulator = None
@@ -514,25 +514,27 @@ class Marionette(object):
                                             logdir=logdir,
                                             arch=emulator,
                                             sdcard=sdcard,
                                             symbols_path=symbols_path,
                                             binary=emulator_binary,
                                             userdata=emulator_img,
                                             resolution=emulator_res,
                                             profile=profile,
-                                            adb_path=adb_path)
+                                            adb_path=adb_path,
+                                            process_args=process_args)
             self.emulator = self.runner.device
             self.emulator.start()
             self.port = self.emulator.setup_port_forwarding(self.port)
             assert(self.emulator.wait_for_port(self.port)), "Timed out waiting for port!"
 
         if connect_to_running_emulator:
             self.runner = B2GEmulatorRunner(b2g_home=homedir,
-                                            logdir=logdir)
+                                            logdir=logdir,
+                                            process_args=process_args)
             self.emulator = self.runner.device
             self.emulator.connect()
             self.port = self.emulator.setup_port_forwarding(self.port)
             assert(self.emulator.wait_for_port(self.port)), "Timed out waiting for port!"
 
         self.client = MarionetteTransport(self.host, self.port, self.socket_timeout)
 
         if emulator:
--- a/testing/mochitest/browser-harness.xul
+++ b/testing/mochitest/browser-harness.xul
@@ -6,16 +6,17 @@
 
 <window id="browserTestHarness"
         xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul"
         onload="TestStart();"
         title="Browser chrome tests"
         width="1024">
   <script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/MozillaLogger.js"/>
   <script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/LogController.js"/>
+  <script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/TestRunner.js"/>
   <script type="application/javascript" src="chrome://mochikit/content/chrome-harness.js"/>
   <script type="application/javascript" src="chrome://mochikit/content/manifestLibrary.js" />
   <script type="application/javascript" src="chrome://mochikit/content/chunkifyTests.js"/>
   <style xmlns="http://www.w3.org/1999/xhtml"><![CDATA[
     #results {
       margin: 5px;
       background-color: window;
       -moz-user-select: text;
@@ -60,39 +61,39 @@
       text-decoration: underline;
     }
   ]]></style>
   <script type="application/javascript;version=1.7"><![CDATA[
     if (Cc === undefined) {
       var Cc = Components.classes;
       var Ci = Components.interfaces;
     }
-    
+
     var gConfig;
 
     var gDumper = {
       get fileLogger() {
         let logger = null;
         if (gConfig.logFile) {
           try {
             logger = new MozillaFileLogger(gConfig.logFile)
           } catch (ex) {
             dump("TEST-UNEXPECTED-FAIL | (browser-harness.xul) | " +
                  "Error trying to log to " + gConfig.logFile + ": " + ex + "\n");
           }
         }
         delete this.fileLogger;
         return this.fileLogger = logger;
       },
-
+      structuredLogger: new StructuredLogger(),
       dump: function (str) {
-        dump(str);
+        this.structuredLogger.info(str);
 
         if (this.fileLogger)
-          this.fileLogger._foStream.write(str, str.length);
+          this.fileLogger.log(str);
       },
 
       done: function () {
         if (this.fileLogger)
           this.fileLogger.close();
       }
     }
 
@@ -136,17 +137,28 @@
       get failCount() {
         return this.results.filter(function (t) !t.info && !t.pass).length;
       },
 
       addResult: function addResult(result) {
         this.lastOutputTime = Date.now();
         this.results.push(result);
 
-        this.dumper.dump(result.result + " | " + this.path + " | " + result.msg + "\n");
+        if (result.info) {
+          if (result.msg) {
+            this.dumper.structuredLogger.info(result.msg);
+          }
+          return;
+        }
+
+        this.dumper.structuredLogger.testStatus(this.path,
+                                                result.msg,
+                                                result.status,
+                                                result.expected,
+                                                result.msg);
       },
 
       setDuration: function setDuration(duration) {
         this.duration = duration;
       },
 
       get htmlLog() {
         let txtToHTML = Cc["@mozilla.org/txttohtmlconv;1"].
--- a/testing/mochitest/browser-test.js
+++ b/testing/mochitest/browser-test.js
@@ -54,17 +54,17 @@ function testInit() {
     Services.ww.openWindow(window, "chrome://mochikit/content/browser-harness.xul", "browserTest",
                            "chrome,centerscreen,dialog=no,resizable,titlebar,toolbar=no,width=800,height=600", sstring);
   } else {
     // This code allows us to redirect without requiring specialpowers for chrome and a11y tests.
     let messageHandler = function(m) {
       messageManager.removeMessageListener("chromeEvent", messageHandler);
       var url = m.json.data;
 
-      // Window is the [ChromeWindow] for messageManager, so we need content.window 
+      // Window is the [ChromeWindow] for messageManager, so we need content.window
       // Currently chrome tests are run in a content window instead of a ChromeWindow
       var webNav = content.window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
                          .getInterface(Components.interfaces.nsIWebNavigation);
       webNav.loadURI(url, null, null, null, null);
     };
 
     var listener = 'data:,function doLoad(e) { var data=e.detail&&e.detail.data;removeEventListener("contentEvent", function (e) { doLoad(e); }, false, true);sendAsyncMessage("chromeEvent", {"data":data}); };addEventListener("contentEvent", function (e) { doLoad(e); }, false, true);';
     messageManager.loadFrameScript(listener, true);
@@ -153,17 +153,17 @@ Tester.prototype = {
       gConfig = readConfig();
 
     if (gConfig.runUntilFailure)
       this.runUntilFailure = true;
 
     if (gConfig.repeat)
       this.repeat = gConfig.repeat;
 
-    this.dumper.dump("*** Start BrowserChrome Test Results ***\n");
+    this.dumper.structuredLogger.info("*** Start BrowserChrome Test Results ***");
     Services.console.registerListener(this);
     Services.obs.addObserver(this, "chrome-document-global-created", false);
     Services.obs.addObserver(this, "content-document-global-created", false);
     this._globalProperties = Object.keys(window);
     this._globalPropertyWhitelist = [
       "navigator", "constructor", "top",
       "Application",
       "__SS_tabsToRestore", "__SSi",
@@ -199,17 +199,17 @@ Tester.prototype = {
     // Replace the last tab with a fresh one
     if (window.gBrowser) {
       gBrowser.addTab("about:blank", { skipAnimation: true });
       gBrowser.removeCurrentTab();
       gBrowser.stop();
     }
 
     // Remove stale windows
-    this.dumper.dump("TEST-INFO | checking window state\n");
+    this.dumper.structuredLogger.info("checking window state");
     let windowsEnum = Services.wm.getEnumerator(null);
     while (windowsEnum.hasMoreElements()) {
       let win = windowsEnum.getNext();
       if (win != window && !win.closed &&
           win.document.documentElement.getAttribute("id") != "browserTestHarness") {
         let type = win.document.documentElement.getAttribute("windowtype");
         switch (type) {
         case "navigator:browser":
@@ -218,17 +218,20 @@ Tester.prototype = {
         case null:
           type = "unknown window";
           break;
         }
         let msg = baseMsg.replace("{elt}", type);
         if (this.currentTest)
           this.currentTest.addResult(new testResult(false, msg, "", false));
         else
-          this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " + msg + "\n");
+          this.dumper.structuredLogger.testEnd("browser-test.js",
+                                               "FAIL",
+                                               "PASS",
+                                               msg);
 
         win.close();
       }
     }
 
     // Make sure the window is raised before each test.
     this.SimpleTest.waitForFocus(aCallback);
   },
@@ -245,32 +248,33 @@ Tester.prototype = {
       this.currentTestIndex = -1;
       this.nextTest();
     }
     else{
       Services.console.unregisterListener(this);
       Services.obs.removeObserver(this, "chrome-document-global-created");
       Services.obs.removeObserver(this, "content-document-global-created");
       this.Promise.Debugging.clearUncaughtErrorObservers();
-      this.dumper.dump("\nINFO TEST-START | Shutdown\n");
+      this.dumper.structuredLogger.info("TEST-START | Shutdown");
 
       if (this.tests.length) {
-        this.dumper.dump("Browser Chrome Test Summary\n");
-  
-        this.dumper.dump("\tPassed: " + passCount + "\n" +
-                         "\tFailed: " + failCount + "\n" +
-                         "\tTodo: " + todoCount + "\n");
+        this.dumper.structuredLogger.info("Browser Chrome Test Summary");
+        this.dumper.structuredLogger.info("Passed:  " + passCount);
+        this.dumper.structuredLogger.info("Failed:  " + failCount);
+        this.dumper.structuredLogger.info("Todo:    " + todoCount);
       } else {
-        this.dumper.dump("TEST-UNEXPECTED-FAIL | (browser-test.js) | " +
-                         "No tests to run. Did you pass an invalid --test-path?\n");
+        this.dumper.structuredLogger.testEnd("browser-test.js",
+                                             "FAIL",
+                                             "PASS",
+                                             "No tests to run. Did you pass an invalid --test-path?");
       }
-      this.dumper.dump("\n*** End BrowserChrome Test Results ***\n");
-  
+      this.dumper.structuredLogger.info("*** End BrowserChrome Test Results ***");
+
       this.dumper.done();
-  
+
       // Tests complete, notify the callback and return
       this.callback(this.tests);
       this.callback = null;
       this.tests = null;
       this.openedWindows = null;
     }
   },
 
@@ -409,27 +413,30 @@ Tester.prototype = {
         }
       }
 
       // Dump memory stats for main thread.
       if (Cc["@mozilla.org/xre/runtime;1"]
           .getService(Ci.nsIXULRuntime)
           .processType == Ci.nsIXULRuntime.PROCESS_TYPE_DEFAULT)
       {
-        this.MemoryStats.dump((l) => { this.dumper.dump(l + "\n"); },
+        this.MemoryStats.dump(this.dumper.structuredLogger,
                               this.currentTestIndex,
                               this.currentTest.path,
                               gConfig.dumpOutputDirectory,
                               gConfig.dumpAboutMemoryAfterTest,
                               gConfig.dumpDMDAfterTest);
       }
 
       // Note the test run time
       let time = Date.now() - this.lastStartTime;
-      this.dumper.dump("INFO TEST-END | " + this.currentTest.path + " | finished in " + time + "ms\n");
+      this.dumper.structuredLogger.testEnd(this.currentTest.path,
+                                           "OK",
+                                           undefined,
+                                           "finished in " + time + "ms");
       this.currentTest.setDuration(time);
 
       if (this.runUntilFailure && this.currentTest.failCount > 0) {
         this.haltTests();
       }
 
       // Restore original SimpleTest methods to avoid leaks.
       SIMPLETEST_OVERRIDES.forEach(m => {
@@ -525,17 +532,17 @@ Tester.prototype = {
       }
 
       this.currentTestIndex++;
       this.execTest();
     }).bind(this));
   }),
 
   execTest: function Tester_execTest() {
-    this.dumper.dump("TEST-START | " + this.currentTest.path + "\n");
+    this.dumper.structuredLogger.testStart(this.currentTest.path);
 
     this.SimpleTest.reset();
 
     // Load the tests into a testscope
     let currentScope = this.currentTest.scope = new testScope(this, this.currentTest);
     let currentTest = this.currentTest;
 
     // Import utils in the test scope.
@@ -694,20 +701,24 @@ Tester.prototype = {
 function testResult(aCondition, aName, aDiag, aIsTodo, aStack) {
   this.msg = aName || "";
 
   this.info = false;
   this.pass = !!aCondition;
   this.todo = aIsTodo;
 
   if (this.pass) {
-    if (aIsTodo)
-      this.result = "TEST-KNOWN-FAIL";
-    else
-      this.result = "TEST-PASS";
+    if (aIsTodo) {
+      this.status = "FAIL";
+      this.expected = "FAIL";
+    } else {
+      this.status = "PASS";
+      this.expected = "PASS";
+    }
+
   } else {
     if (aDiag) {
       if (typeof aDiag == "object" && "fileName" in aDiag) {
         // we have an exception - print filename and linenumber information
         this.msg += " at " + aDiag.fileName + ":" + aDiag.lineNumber;
       }
       this.msg += " - " + aDiag;
     }
@@ -720,33 +731,35 @@ function testResult(aCondition, aName, a
           frames.push(frame.filename + ":" + frame.name + ":" + frame.lineNumber);
         }
         normalized = frames.join("\n");
       } else {
         normalized = "" + aStack;
       }
       this.msg += Task.Debugging.generateReadableStack(normalized, "    ");
     }
-    if (aIsTodo)
-      this.result = "TEST-UNEXPECTED-PASS";
-    else
-      this.result = "TEST-UNEXPECTED-FAIL";
+    if (aIsTodo) {
+      this.status = "PASS";
+      this.expected = "FAIL";
+    } else {
+      this.status = "FAIL";
+      this.expected = "PASS";
+    }
 
     if (gConfig.debugOnFailure) {
       // You've hit this line because you requested to break into the
       // debugger upon a testcase failure on your test run.
       debugger;
     }
   }
 }
 
 function testMessage(aName) {
   this.msg = aName || "";
   this.info = true;
-  this.result = "TEST-INFO";
 }
 
 // Need to be careful adding properties to this object, since its properties
 // cannot conflict with global variables used in tests.
 function testScope(aTester, aTest) {
   this.__tester = aTester;
 
   var self = this;
--- a/testing/mochitest/chrome-harness.js
+++ b/testing/mochitest/chrome-harness.js
@@ -4,30 +4,30 @@
  * License, v. 2.0. If a copy of the MPL was not distributed with this
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 
 Components.utils.import("resource://gre/modules/NetUtil.jsm");
 
 /*
  * getChromeURI converts a URL to a URI
- * 
+ *
  * url: string of a URL (http://mochi.test/test.html)
  * returns: a nsiURI object representing the given URL
  *
  */
 function getChromeURI(url) {
   var ios = Components.classes["@mozilla.org/network/io-service;1"].
               getService(Components.interfaces.nsIIOService);
   return ios.newURI(url, null, null);
 }
 
 /*
  * Convert a URL (string) into a nsIURI or NSIJARURI
- * This is intended for URL's that are on a file system 
+ * This is intended for URL's that are on a file system
  * or in packaged up in an extension .jar file
  *
  * url: a string of a url on the local system(http://localhost/blah.html)
  */
 function getResolvedURI(url) {
   var chromeURI = getChromeURI(url);
   var resolvedURI = Components.classes["@mozilla.org/chrome/chrome-registry;1"].
                     getService(Components.interfaces.nsIChromeRegistry).
@@ -59,17 +59,17 @@ function getChromeDir(resolvedURI) {
 
 /*
  * given a .jar file, we get all test files located inside the archive
  *
  * aBasePath: base URL to determine chrome location and search for tests
  * aTestPath: passed in testPath value from command line such as: dom/tests/mochitest
  * aDir: the test dir to append to the baseURL after getting a directory interface
  *
- * As a note, this is hardcoded to the .jar structure we use for mochitest.  
+ * As a note, this is hardcoded to the .jar structure we use for mochitest.
  * Please don't assume this works for all jar files.
  */
 function getMochitestJarListing(aBasePath, aTestPath, aDir)
 {
   var zReader = Components.classes["@mozilla.org/libjar/zip-reader;1"].
                   createInstance(Components.interfaces.nsIZipReader);
   var fileHandler = Components.classes["@mozilla.org/network/protocol;1?name=file"].
                     getService(Components.interfaces.nsIFileProtocolHandler);
@@ -117,21 +117,21 @@ function getMochitestJarListing(aBasePat
  * returns:
  *  [json object of {dir:{subdir:{file:true, file:true, ...}}}, count of tests]
  */
 function zList(base, zReader, baseJarName, recurse) {
   var dirs = zReader.findEntries(base + "*");
   var links = {};
   var count = 0;
   var fileArray = [];
-  
+
   while(dirs.hasMore()) {
     var entryName = dirs.getNext();
     if (entryName.substr(-1) == '/' && entryName.split('/').length == (base.split('/').length + 1) ||
-        (entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) { 
+        (entryName.substr(-1) != '/' && entryName.split('/').length == (base.split('/').length))) {
       fileArray.push(entryName);
     }
   }
   fileArray.sort();
   count = fileArray.length;
   for (var i=0; i < fileArray.length; i++) {
     var myFile = fileArray[i];
     if (myFile.substr(-1) === '/' && recurse) {
@@ -173,17 +173,17 @@ function getFileListing(basePath, testPa
   var ioSvc = Components.classes["@mozilla.org/network/io-service;1"].
               getService(Components.interfaces.nsIIOService);
   var testsDirURI = ioSvc.newFileURI(chromeDir);
   var testsDir = ioSvc.newURI(testPath, null, testsDirURI)
                   .QueryInterface(Components.interfaces.nsIFileURL).file;
 
   if (testPath != undefined) {
     var extraPath = testPath;
-    
+
     var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/;
 
     // Invalid testPath...
     if (!testsDir.exists())
       return null;
 
     if (testsDir.isFile()) {
       if (fileNameRegexp.test(testsDir.leafName)) {
@@ -336,17 +336,17 @@ function getTestFilePath(path) {
         } else if (p != ".") {
           file.append(p);
         }
       });
   return file.path;
 }
 
 /*
- * Simple utility function to take the directory structure in jarentryname and 
+ * Simple utility function to take the directory structure in jarentryname and
  * translate that to a path of a nsILocalFile.
  */
 function buildRelativePath(jarentryname, destdir, basepath)
 {
   var baseParts = basepath.split('/');
   if (baseParts[baseParts.length-1] == '') {
     baseParts.pop();
   }
--- a/testing/mochitest/runtests.py
+++ b/testing/mochitest/runtests.py
@@ -19,74 +19,203 @@ import mozcrash
 import mozinfo
 import mozprocess
 import mozrunner
 import optparse
 import re
 import shutil
 import signal
 import subprocess
+import sys
 import tempfile
 import time
 import traceback
 import urllib2
 import zipfile
 import bisection
 
 from automationutils import environment, getDebuggerInfo, isURL, KeyValueParseError, parseKeyValue, processLeakLog, dumpScreen, ShutdownLeaks, printstatus, LSANLeaks
 from datetime import datetime
 from manifestparser import TestManifest
 from mochitest_options import MochitestOptions
 from mozprofile import Profile, Preferences
 from mozprofile.permissions import ServerLocations
 from urllib import quote_plus as encodeURIComponent
+from mozlog.structured.formatters import TbplFormatter
+from mozlog.structured.handlers import StreamHandler
+from mozlog.structured.structuredlog import StructuredLogger
 
 # This should use the `which` module already in tree, but it is
 # not yet present in the mozharness environment
 from mozrunner.utils import findInPath as which
 
-# set up logging handler a la automation.py.in for compatability
-import logging
-log = logging.getLogger()
-def resetGlobalLog():
-  while log.handlers:
-    log.removeHandler(log.handlers[0])
-  handler = logging.StreamHandler(sys.stdout)
-  log.setLevel(logging.INFO)
-  log.addHandler(handler)
-resetGlobalLog()
-
 ###########################
 # Option for NSPR logging #
 ###########################
 
 # Set the desired log modules you want an NSPR log be produced by a try run for, or leave blank to disable the feature.
 # This will be passed to NSPR_LOG_MODULES environment variable. Try run will then put a download link for the log file
 # on tbpl.mozilla.org.
 
 NSPR_LOG_MODULES = ""
 
 ####################
+# LOG HANDLING     #
+####################
+
+### output processing
+class MochitestFormatter(TbplFormatter):
+    log_num = 0
+
+    def __init__(self):
+        super(MochitestFormatter, self).__init__()
+
+    def __call__(self, data):
+        tbpl_output = super(MochitestFormatter, self).__call__(data)
+        output = '%d INFO %s' % (MochitestFormatter.log_num, tbpl_output)
+        MochitestFormatter.log_num += 1
+        return output
+
+### output processing
+class MessageLogger(object):
+    """File-like object for logging messages (structured logs)"""
+    BUFFERING_THRESHOLD = 100
+    # This is a delimiter used by the JS side to avoid logs interleaving
+    DELIMITER = u'\ue175\uee31\u2c32\uacbf'
+    BUFFERED_ACTIONS = set(['test_status', 'log'])
+    VALID_ACTIONS = set(['suite_start', 'suite_end', 'test_start', 'test_end',
+                         'test_status', 'log',
+                         'buffering_on', 'buffering_off'])
+
+    def __init__(self, logger, buffering=True, name='mochitest'):
+        self.logger = logger
+        self.buffering = buffering
+        self.tests_started = False
+
+        # Message buffering
+        self.buffered_messages = []
+
+    def valid_message(self, obj):
+        """True if the given object is a valid structured message (only does a superficial validation)"""
+        return isinstance(obj, dict) and 'action' in obj and obj['action'] in MessageLogger.VALID_ACTIONS
+
+    def parse_line(self, line):
+        """Takes a given line of input (structured or not) and returns a list of structured messages"""
+        line = line.rstrip().decode("UTF-8", "replace")
+
+        messages = []
+        for fragment in line.split(MessageLogger.DELIMITER):
+            if not fragment:
+                continue
+            try:
+                message = json.loads(fragment)
+                if not self.valid_message(message):
+                    message = dict(action='log', level='info', message=fragment, unstructured=True)
+            except ValueError:
+                message = dict(action='log', level='info', message=fragment, unstructured=True)
+            messages.append(message)
+
+        return messages
+
+    def process_message(self, message):
+        """Processes a structured message. Takes into account buffering, errors, ..."""
+        if not self.tests_started and message['action'] == 'test_start':
+            self.tests_started = True
+
+        # Activation/deactivating message buffering from the JS side
+        if message['action'] == 'buffering_on':
+            self.buffering = True
+            return
+        if message['action'] == 'buffering_off':
+            self.buffering = False
+            return
+
+        unstructured = False
+        if 'unstructured' in message:
+            unstructured = True
+            message.pop('unstructured')
+
+        # If we don't do any buffering, or the tests haven't started, or the message was unstructured, it is directly logged
+        if not self.buffering or unstructured or not self.tests_started:
+            self.logger.log_raw(message)
+            return
+
+        # If a test ended, we clean the buffer
+        if message['action'] == 'test_end':
+            self.buffered_messages = []
+
+        # Buffering logic; Also supports "raw" errors (in log messages) because some tests manually dump 'TEST-UNEXPECTED-FAIL'
+        is_error = 'expected' in message or (message['action'] == 'log' and message['message'].startswith('TEST-UNEXPECTED'))
+        if not is_error and message['action'] not in self.BUFFERED_ACTIONS:
+            self.logger.log_raw(message)
+            return
+
+        # test_status messages buffering
+        if is_error:
+            if self.buffered_messages:
+                number_messages = min(self.BUFFERING_THRESHOLD, len(self.buffered_messages))
+                self.logger.info("dumping last {0} message(s)".format(number_messages))
+                self.logger.info("if you need more context, please use SimpleTest.requestCompleteLog() in your test")
+                # Dumping previously buffered messages
+                self.dump_buffered(limit=True)
+
+            # Logging the error message
+            self.logger.log_raw(message)
+        else:
+            # Buffering the message
+            self.buffered_messages.append(message)
+
+    def write(self, line):
+        messages = self.parse_line(line)
+        for message in messages:
+            self.process_message(message)
+        return messages
+
+    def flush(self):
+        sys.stdout.flush()
+
+    def dump_buffered(self, limit=False):
+        if limit:
+            dumped_messages = self.buffered_messages[-self.BUFFERING_THRESHOLD:]
+        else:
+            dumped_messages = self.buffered_messages
+
+        for buf_msg in dumped_messages:
+            self.logger.log_raw(buf_msg)
+        # Cleaning the list of buffered messages
+        self.buffered_messages = []
+
+    def finish(self):
+        self.dump_buffered()
+        self.buffering = False
+        self.logger.suite_end()
+
+# Global logger
+log = StructuredLogger('mochitest')
+stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
+log.add_handler(stream_handler)
+
+####################
 # PROCESS HANDLING #
 ####################
 
 def call(*args, **kwargs):
   """front-end function to mozprocess.ProcessHandler"""
   # TODO: upstream -> mozprocess
   # https://bugzilla.mozilla.org/show_bug.cgi?id=791383
   process = mozprocess.ProcessHandler(*args, **kwargs)
   process.run()
   return process.wait()
 
 def killPid(pid):
   # see also https://bugzilla.mozilla.org/show_bug.cgi?id=911249#c58
   try:
     os.kill(pid, getattr(signal, "SIGKILL", signal.SIGTERM))
   except Exception, e:
-    log.info("Failed to kill process %d: %s", pid, str(e))
+    log.info("Failed to kill process %d: %s" % (pid, str(e)))
 
 if mozinfo.isWin:
   import ctypes, ctypes.wintypes, time, msvcrt
 
   def isPidAlive(pid):
     STILL_ACTIVE = 259
     PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
     pHandle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, 0, pid)
@@ -168,19 +297,19 @@ class MochitestServer(object):
                     "testPrefix" : self.testPrefix, "displayResults" : str(not self._closeWhenDone).lower() },
             "-f", os.path.join(SCRIPT_DIR, "server.js")]
 
     xpcshell = os.path.join(self._utilityPath,
                             "xpcshell" + mozinfo.info['bin_suffix'])
     command = [xpcshell] + args
     self._process = mozprocess.ProcessHandler(command, cwd=SCRIPT_DIR, env=env)
     self._process.run()
-    log.info("%s : launching %s", self.__class__.__name__, command)
+    log.info("%s : launching %s" % (self.__class__.__name__, command))
     pid = self._process.pid
-    log.info("runtests.py | Server pid: %d", pid)
+    log.info("runtests.py | Server pid: %d" % pid)
 
   def ensureReady(self, timeout):
     assert timeout >= 0
 
     aliveFile = os.path.join(self._profileDir, "server_alive.txt")
     i = 0
     while i < timeout:
       if os.path.exists(aliveFile):
@@ -234,17 +363,17 @@ class WebSocketServer(object):
         cmd += ['--interactive']
     cmd += ['-p', str(self.port), '-w', self._scriptdir, '-l',      \
            os.path.join(self._scriptdir, "websock.log"),            \
            '--log-level=debug', '--allow-handlers-outside-root-dir']
     # start the process
     self._process = mozprocess.ProcessHandler(cmd, cwd=SCRIPT_DIR)
     self._process.run()
     pid = self._process.pid
-    log.info("runtests.py | Websocket server pid: %d", pid)
+    log.info("runtests.py | Websocket server pid: %d" % pid)
 
   def stop(self):
     self._process.kill()
 
 class MochitestUtilsMixin(object):
   """
   Class containing some utility functions common to both local and remote
   mochitest runners
@@ -373,18 +502,16 @@ class MochitestUtilsMixin(object):
       if options.debugOnFailure:
         self.urlOpts.append("debugOnFailure=true")
       if options.dumpOutputDirectory:
         self.urlOpts.append("dumpOutputDirectory=%s" % encodeURIComponent(options.dumpOutputDirectory))
       if options.dumpAboutMemoryAfterTest:
         self.urlOpts.append("dumpAboutMemoryAfterTest=true")
       if options.dumpDMDAfterTest:
         self.urlOpts.append("dumpDMDAfterTest=true")
-      if options.quiet:
-        self.urlOpts.append("quiet=true")
 
   def getTestFlavor(self, options):
     if options.browserChrome:
       return "browser-chrome"
     elif options.chrome:
       return "chrome"
     elif options.a11y:
       return "a11y"
@@ -466,16 +593,20 @@ class MochitestUtilsMixin(object):
     tests = self.getActiveTests(options, disabled)
     paths = []
 
     for test in tests:
       if testsToFilter and (test['path'] not in testsToFilter):
         continue
       paths.append(test)
 
+    # suite_start message
+    flat_paths = [p['path'] for p in paths]
+    self.message_logger.logger.suite_start(flat_paths)
+
     # Bug 883865 - add this functionality into manifestparser
     with open(os.path.join(SCRIPT_DIR, 'tests.json'), 'w') as manifestFile:
       manifestFile.write(json.dumps({'tests': paths}))
     options.manifestFile = 'tests.json'
 
     return self.buildTestURL(options)
 
   def startWebSocketServer(self, options, debuggerInfo):
@@ -525,43 +656,43 @@ class MochitestUtilsMixin(object):
   def stopServers(self):
     """Servers are no longer needed, and perhaps more importantly, anything they
         might spew to console might confuse things."""
     if self.server is not None:
       try:
         log.info('Stopping web server')
         self.server.stop()
       except Exception:
-        log.exception('Exception when stopping web server')
+        log.critical('Exception when stopping web server')
 
     if self.wsserver is not None:
       try:
         log.info('Stopping web socket server')
         self.wsserver.stop()
       except Exception:
-        log.exception('Exception when stopping web socket server');
+        log.critical('Exception when stopping web socket server');
 
     if self.sslTunnel is not None:
       try:
         log.info('Stopping ssltunnel')
         self.sslTunnel.stop()
       except Exception:
-        log.exception('Exception stopping ssltunnel');
+        log.critical('Exception stopping ssltunnel');
 
   def copyExtraFilesToProfile(self, options):
     "Copy extra files or dirs specified on the command line to the testing profile."
     for f in options.extraProfileFiles:
       abspath = self.getFullPath(f)
       if os.path.isfile(abspath):
         shutil.copy2(abspath, options.profilePath)
       elif os.path.isdir(abspath):
         dest = os.path.join(options.profilePath, os.path.basename(abspath))
         shutil.copytree(abspath, dest)
       else:
-        log.warning("runtests.py | Failed to copy %s to profile", abspath)
+        log.warning("runtests.py | Failed to copy %s to profile" % abspath)
 
   def installChromeJar(self, chrome, options):
     """
       copy mochijar directory to profile as an extension so we have chrome://mochikit for all harness code
     """
     # Write chrome.manifest.
     with open(os.path.join(options.profilePath, "extensions", "staged", "mochikit@mozilla.org", "chrome.manifest"), "a") as mfile:
       mfile.write(chrome)
@@ -596,17 +727,17 @@ toolbar#nav-bar {
       manifestFile.write("content mochitests %s contentaccessible=yes\n" % chrometestDir)
 
       if options.testingModulesDir is not None:
         manifestFile.write("resource testing-common file:///%s\n" %
           options.testingModulesDir)
 
     # Call installChromeJar().
     if not os.path.isdir(os.path.join(SCRIPT_DIR, self.jarDir)):
-      log.testFail("invalid setup: missing mochikit extension")
+      log.error(message="TEST-UNEXPECTED-FAIL | invalid setup: missing mochikit extension")
       return None
 
     # Support Firefox (browser), B2G (shell), SeaMonkey (navigator), and Webapp
     # Runtime (webapp).
     chrome = ""
     if options.browserChrome or options.chrome or options.a11y or options.webapprtChrome:
       chrome += """
 overlay chrome://browser/content/browser.xul chrome://mochikit/content/browser-test-overlay.xul
@@ -702,17 +833,17 @@ class SSLTunnel:
       log.error("INFO | runtests.py | expected to find ssltunnel at %s", ssltunnel)
       exit(1)
 
     env = environment(xrePath=self.xrePath)
     env["LD_LIBRARY_PATH"] = self.xrePath
     self.process = mozprocess.ProcessHandler([ssltunnel, self.configFile],
                                                env=env)
     self.process.run()
-    log.info("INFO | runtests.py | SSL tunnel pid: %d", self.process.pid)
+    log.info("runtests.py | SSL tunnel pid: %d" % self.process.pid)
 
   def stop(self):
     """ Stops the SSL Tunnel and cleans up """
     if self.process is not None:
       self.process.kill()
     if os.path.exists(self.configFile):
       os.remove(self.configFile)
 
@@ -836,16 +967,19 @@ class Mochitest(MochitestUtilsMixin):
 
   # XXX use automation.py for test name to avoid breaking legacy
   # TODO: replace this with 'runtests.py' or 'mochitest' or the like
   test_name = 'automation.py'
 
   def __init__(self):
     super(Mochitest, self).__init__()
 
+    # Structured logs parser
+    self.message_logger = MessageLogger(logger=log)
+
     # environment function for browserEnv
     self.environment = environment
 
     # Max time in seconds to wait for server startup before tests will fail -- if
     # this seems big, it's mostly for debug machines where cold startup
     # (particularly after a build) takes forever.
     self.SERVER_STARTUP_TIMEOUT = 180 if mozinfo.info.get('debug') else 90
 
@@ -979,17 +1113,17 @@ class Mochitest(MochitestUtilsMixin):
 
     manifest = self.addChromeToProfile(options)
     self.copyExtraFilesToProfile(options)
 
     # create certificate database for the profile
     # TODO: this should really be upstreamed somewhere, maybe mozprofile
     certificateStatus = self.fillCertificateDB(options)
     if certificateStatus:
-      log.info("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
+      log.error("TEST-UNEXPECTED-FAIL | runtests.py | Certificate integration failed")
       return None
 
     return manifest
 
   def buildBrowserEnv(self, options, debugger=False):
     """build the environment variables for the specific test and operating system"""
     if mozinfo.info["asan"]:
       lsanPath = SCRIPT_DIR
@@ -1035,17 +1169,17 @@ class Mochitest(MochitestUtilsMixin):
       os.remove(self.manifest)
     del self.profile
     if options.pidFile != "":
       try:
         os.remove(options.pidFile)
         if os.path.exists(options.pidFile + ".xpcshell.pid"):
           os.remove(options.pidFile + ".xpcshell.pid")
       except:
-        log.warn("cleaning up pidfile '%s' was unsuccessful from the test harness", options.pidFile)
+        log.warning("cleaning up pidfile '%s' was unsuccessful from the test harness" % options.pidFile)
     options.manifestFile = None
 
   def dumpScreen(self, utilityPath):
     if self.haveDumpedScreen:
       log.info("Not taking screenshot here: see the one that was previously logged")
       return
     self.haveDumpedScreen = True
     dumpScreen(utilityPath)
@@ -1078,38 +1212,38 @@ class Mochitest(MochitestUtilsMixin):
         return
     log.info("Can't trigger Breakpad, just killing process")
     killPid(processPID)
 
   def checkForZombies(self, processLog, utilityPath, debuggerInfo):
     """Look for hung processes"""
 
     if not os.path.exists(processLog):
-      log.info('Automation Error: PID log not found: %s', processLog)
+      log.info('Automation Error: PID log not found: %s' % processLog)
       # Whilst no hung process was found, the run should still display as a failure
       return True
 
     # scan processLog for zombies
-    log.info('INFO | zombiecheck | Reading PID log: %s', processLog)
+    log.info('zombiecheck | Reading PID log: %s' % processLog)
     processList = []
     pidRE = re.compile(r'launched child process (\d+)$')
     with open(processLog) as processLogFD:
       for line in processLogFD:
         log.info(line.rstrip())
         m = pidRE.search(line)
         if m:
           processList.append(int(m.group(1)))
 
     # kill zombies
     foundZombie = False
     for processPID in processList:
-      log.info("INFO | zombiecheck | Checking for orphan process with PID: %d", processPID)
+      log.info("zombiecheck | Checking for orphan process with PID: %d" % processPID)
       if isPidAlive(processPID):
         foundZombie = True
-        log.info("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown", processPID)
+        log.error("TEST-UNEXPECTED-FAIL | zombiecheck | child process %d still alive after shutdown" % processPID)
         self.killAndGetStack(processPID, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
 
     return foundZombie
 
   def startVMwareRecording(self, options):
     """ starts recording inside VMware VM using the recording helper dll """
     assert mozinfo.isWin
     from ctypes import cdll
@@ -1149,22 +1283,26 @@ class Mochitest(MochitestUtilsMixin):
              utilityPath,
              debuggerInfo=None,
              symbolsPath=None,
              timeout=-1,
              onLaunch=None,
              webapprtChrome=False,
              screenshotOnFail=False,
              testPath=None,
-             bisectChunk=None):
+             bisectChunk=None,
+             quiet=False):
     """
     Run the app, log the duration it took to execute, return the status code.
     Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds.
     """
 
+    # configure the message logger buffering
+    self.message_logger.buffering = quiet
+
     # debugger information
     interactive = False
     debug_args = None
     if debuggerInfo:
         interactive = debuggerInfo['interactive']
         debug_args = [debuggerInfo['path']] + debuggerInfo['args']
 
     # fix default timeout
@@ -1248,17 +1386,17 @@ class Mochitest(MochitestUtilsMixin):
                           process_class=mozprocess.ProcessHandlerMixin,
                           process_args=kp_kwargs)
 
       # start the runner
       runner.start(debug_args=debug_args,
                    interactive=interactive,
                    outputTimeout=timeout)
       proc = runner.process_handler
-      log.info("INFO | runtests.py | Application pid: %d", proc.pid)
+      log.info("runtests.py | Application pid: %d" % proc.pid)
 
       if onLaunch is not None:
         # Allow callers to specify an onLaunch callback to be fired after the
         # app is launched.
         # We call onLaunch for b2g desktop mochitests so that we can
         # run a Marionette script after gecko has completed startup.
         onLaunch()
 
@@ -1276,21 +1414,22 @@ class Mochitest(MochitestUtilsMixin):
       else:
         didTimeout = proc.didTimeout
 
       # finalize output handler
       outputHandler.finish(didTimeout)
 
       # record post-test information
       if status:
-        log.info("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s", self.lastTestSeen, status)
+        self.message_logger.dump_buffered()
+        log.error("TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % (self.lastTestSeen, status))
       else:
         self.lastTestSeen = 'Main app process exited normally'
 
-      log.info("INFO | runtests.py | Application ran for: %s", str(datetime.now() - startTime))
+      log.info("runtests.py | Application ran for: %s" % str(datetime.now() - startTime))
 
       # Do a final check for zombie child processes.
       zombieProcesses = self.checkForZombies(processLog, utilityPath, debuggerInfo)
 
       # check for crashes
       minidump_path = os.path.join(self.profile.profile, "minidumps")
       crashed = mozcrash.check_for_crashes(minidump_path,
                                            symbolsPath,
@@ -1562,17 +1701,18 @@ class Mochitest(MochitestUtilsMixin):
                              utilityPath=options.utilityPath,
                              debuggerInfo=debuggerInfo,
                              symbolsPath=options.symbolsPath,
                              timeout=timeout,
                              onLaunch=onLaunch,
                              webapprtChrome=options.webapprtChrome,
                              screenshotOnFail=options.screenshotOnFail,
                              testPath=options.testPath,
-                             bisectChunk=options.bisectChunk
+                             bisectChunk=options.bisectChunk,
+                             quiet=options.quiet
         )
       except KeyboardInterrupt:
         log.info("runtests.py | Received keyboard interrupt.\n");
         status = -1
       except:
         traceback.print_exc()
         log.error("Automation Error: Received unexpected exception while running application\n")
         status = 1
@@ -1587,32 +1727,39 @@ class Mochitest(MochitestUtilsMixin):
     if self.nsprLogs:
       with zipfile.ZipFile("%s/nsprlog.zip" % browserEnv["MOZ_UPLOAD_DIR"], "w", zipfile.ZIP_DEFLATED) as logzip:
         for logfile in glob.glob("%s/nspr*.log*" % tempfile.gettempdir()):
           logzip.write(logfile)
           os.remove(logfile)
 
     log.info("runtests.py | Running tests: end.")
 
+    self.message_logger.finish()
+
     if self.manifest is not None:
       self.cleanup(options)
 
     return status
 
   def handleTimeout(self, timeout, proc, utilityPath, debuggerInfo, browserProcessId, testPath=None):
     """handle process output timeout"""
     # TODO: bug 913975 : _processOutput should call self.processOutputLine one more time one timeout (I think)
     if testPath:
-      log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output on %s", self.lastTestSeen, int(timeout), testPath)
+      error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output on %s" % (self.lastTestSeen, int(timeout), testPath)
     else:
-      log.info("TEST-UNEXPECTED-FAIL | %s | application timed out after %d seconds with no output", self.lastTestSeen, int(timeout))
+      error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % (self.lastTestSeen, int(timeout))
+
+    self.message_logger.dump_buffered()
+    self.message_logger.buffering = False
+    log.error(error_message)
+
     browserProcessId = browserProcessId or proc.pid
     self.killAndGetStack(browserProcessId, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)
 
-  ### output processing
+
 
   class OutputHandler(object):
     """line output handler for mozrunner"""
     def __init__(self, harness, utilityPath, symbolsPath=None, dump_screen_on_timeout=True, dump_screen_on_fail=False, shutdownLeaks=None, lsanLeaks=None, bisectChunk=None):
       """
       harness -- harness instance
       dump_screen_on_timeout -- whether to dump the screen on timeout
       """
@@ -1633,36 +1780,44 @@ class Mochitest(MochitestUtilsMixin):
       # pick up on and parse out. This variable tracks the real browser process id if we find it.
       self.browserProcessId = None
 
       # stack fixer function and/or process
       self.stackFixerFunction, self.stackFixerProcess = self.stackFixer()
 
     def processOutputLine(self, line):
       """per line handler of output for mozprocess"""
-      for handler in self.outputHandlers():
-        line = handler(line)
-      if self.bisectChunk:
-        self.record_result(line)
-        self.first_error(line)
+      # Parsing the line (by the structured messages logger).
+      messages = self.harness.message_logger.parse_line(line)
+
+      for message in messages:
+          # Passing the message to the handlers
+          for handler in self.outputHandlers():
+            message = handler(message)
+
+          # Processing the message by the logger
+          self.harness.message_logger.process_message(message)
+
     __call__ = processOutputLine
 
     def outputHandlers(self):
       """returns ordered list of output handlers"""
-      return [self.fix_stack,
-              self.format,
-              self.record_last_test,
-              self.dumpScreenOnTimeout,
-              self.dumpScreenOnFail,
-              self.metro_subprocess_id,
-              self.trackShutdownLeaks,
-              self.trackLSANLeaks,
-              self.log,
-              self.countline,
-              ]
+      handlers = [self.fix_stack,
+                  self.record_last_test,
+                  self.dumpScreenOnTimeout,
+                  self.dumpScreenOnFail,
+                  self.trackShutdownLeaks,
+                  self.trackLSANLeaks,
+                  self.countline,
+                  ]
+      if self.bisectChunk:
+        handlers.append(self.record_result)
+        handlers.append(self.first_error)
+
+      return handlers
 
     def stackFixer(self):
       """
       return 2-tuple, (stackFixerFunction, StackFixerProcess),
       if any, to use on the output lines
       """
 
       if not mozinfo.info.get('debug'):
@@ -1696,109 +1851,99 @@ class Mochitest(MochitestUtilsMixin):
 
       return (stackFixerFunction, stackFixerProcess)
 
     def finish(self, didTimeout):
       if self.stackFixerProcess:
         self.stackFixerProcess.communicate()
         status = self.stackFixerProcess.returncode
         if status and not didTimeout:
-          log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run", status)
+          log.info("TEST-UNEXPECTED-FAIL | runtests.py | Stack fixer process exited with code %d during test run" % status)
 
       if self.shutdownLeaks:
         self.shutdownLeaks.process()
 
       if self.lsanLeaks:
         self.lsanLeaks.process()
 
+    # output message handlers:
+    # these take a message and return a message
 
-    # output line handlers:
-    # these take a line and return a line
-    def record_result(self, line):
-      if "TEST-START" in line: #by default make the result key equal to pass.
-        key = line.split('|')[-1].split('/')[-1].strip()
+    def record_result(self, message):
+      if message['action'] == 'test_start': #by default make the result key equal to pass.
+        key = message['test'].split('/')[-1].strip()
         self.harness.result[key] = "PASS"
-      elif "TEST-UNEXPECTED" in line:
-        key = line.split('|')[-2].split('/')[-1].strip()
-        self.harness.result[key] = "FAIL"
-      elif "TEST-KNOWN-FAIL" in line:
-        key = line.split('|')[-2].split('/')[-1].strip()
-        self.harness.result[key] = "TODO"
-      return line
+      elif message['action'] in ['test_end', 'test_status']:
+        if 'expected' in message:
+          key = message['test'].split('/')[-1].strip()
+          self.harness.result[key] = "FAIL"
+        elif message['status'] == 'FAIL':
+          key = message['test'].split('/')[-1].strip()
+          self.harness.result[key] = "TODO"
+      return message
 
-    def first_error(self, line):
-      if "TEST-UNEXPECTED-FAIL" in line:
-        key = line.split('|')[-2].split('/')[-1].strip()
+    def first_error(self, message):
+      if 'expected' in message and message['status'] == 'FAIL':
+        key = message['test'].split('/')[-1].strip()
         if key not in self.harness.expectedError:
-          self.harness.expectedError[key] = line.split('|')[-1].strip()
-      return line
+          self.harness.expectedError[key] = message['message'].strip()
+      return message
 
-    def countline(self, line):
+    def countline(self, message):
+      if message['action'] != 'log':
+        return message
+      line = message['message']
       val = 0
       try:
         val = int(line.split(':')[-1].strip())
-      except ValueError, e:
-        return line
+      except ValueError:
+        return message
 
       if "Passed:" in line:
         self.harness.countpass += val
       elif "Failed:" in line:
         self.harness.countfail += val
       elif "Todo:" in line:
         self.harness.counttodo += val
-      return line
+      return message
 
-    def fix_stack(self, line):
-      if self.stackFixerFunction:
-        return self.stackFixerFunction(line)
-      return line
-
-    def format(self, line):
-      """format the line"""
-      return line.rstrip().decode("UTF-8", "ignore")
+    def fix_stack(self, message):
+      if message['action'] == 'log' and self.stackFixerFunction:
+        message['message'] = self.stackFixerFunction(message['message'])
+      return message
 
-    def record_last_test(self, line):
+    def record_last_test(self, message):
       """record last test on harness"""
-      if "TEST-START" in line and "|" in line:
-        self.harness.lastTestSeen = line.split("|")[1].strip()
-      return line
+      if message['action'] == 'test_start':
+        self.harness.lastTestSeen = message['test']
+      return message
 
-    def dumpScreenOnTimeout(self, line):
-      if not self.dump_screen_on_fail and self.dump_screen_on_timeout and "TEST-UNEXPECTED-FAIL" in line and "Test timed out" in line:
-        self.harness.dumpScreen(self.utilityPath)
-      return line
-
-    def dumpScreenOnFail(self, line):
-      if self.dump_screen_on_fail and "TEST-UNEXPECTED-FAIL" in line:
+    def dumpScreenOnTimeout(self, message):
+      if (not self.dump_screen_on_fail
+          and self.dump_screen_on_timeout
+          and 'expected' in message and message['status'] == 'FAIL'
+          and 'message' in message
+          and "Test timed out" in message['message']):
         self.harness.dumpScreen(self.utilityPath)
-      return line
+      return message
+
+    def dumpScreenOnFail(self, message):
+      if self.dump_screen_on_fail and 'expected' in message and message['status'] == 'FAIL':
+        self.harness.dumpScreen(self.utilityPath)
+      return message
 
-    def metro_subprocess_id(self, line):
-      """look for metro browser subprocess id"""
-      if "METRO_BROWSER_PROCESS" in line:
-        index = line.find("=")
-        if index != -1:
-          self.browserProcessId = line[index+1:].rstrip()
-          log.info("INFO | runtests.py | metro browser sub process id detected: %s", self.browserProcessId)
-      return line
+    def trackLSANLeaks(self, message):
+      if self.lsanLeaks and message['action'] == 'log':
+        self.lsanLeaks.log(message['message'])
+      return message
 
-    def trackShutdownLeaks(self, line):
+    def trackShutdownLeaks(self, message):
       if self.shutdownLeaks:
-        self.shutdownLeaks.log(line)
-      return line
-
-    def trackLSANLeaks(self, line):
-      if self.lsanLeaks:
-        self.lsanLeaks.log(line)
-      return line
-
-    def log(self, line):
-      log.info(line)
-      return line
-
+        self.shutdownLeaks.log(message)
+      return message
 
   def makeTestConfig(self, options):
     "Creates a test configuration file for customizing test execution."
     options.logFile = options.logFile.replace("\\", "\\\\")
     options.testPath = options.testPath.replace("\\", "\\\\")
 
     if "MOZ_HIDE_RESULTS_TABLE" in os.environ and os.environ["MOZ_HIDE_RESULTS_TABLE"] == "1":
       options.hideResultsTable = True
--- a/testing/mochitest/runtestsb2g.py
+++ b/testing/mochitest/runtestsb2g.py
@@ -11,27 +11,28 @@ import tempfile
 import threading
 import traceback
 
 here = os.path.abspath(os.path.dirname(__file__))
 sys.path.insert(0, here)
 
 from runtests import Mochitest
 from runtests import MochitestUtilsMixin
-from runtests import MochitestServer
+from runtests import MessageLogger
+from runtests import MochitestFormatter
 from mochitest_options import B2GOptions, MochitestOptions
-
 from marionette import Marionette
+from mozprofile import Profile, Preferences
+import mozinfo
+from mozlog.structured.handlers import StreamHandler
+from mozlog.structured.structuredlog import StructuredLogger
 
-from mozdevice import DeviceManagerADB
-from mozprofile import Profile, Preferences
-import mozlog
-import mozinfo
-
-log = mozlog.getLogger('Mochitest')
+log = StructuredLogger('Mochitest')
+stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
+log.add_handler(stream_handler)
 
 class B2GMochitest(MochitestUtilsMixin):
     marionette = None
 
     def __init__(self, marionette_args,
                        out_of_process=True,
                        profile_data_dir=None,
                        locations=os.path.join(here, 'server-locations.txt')):
@@ -40,16 +41,19 @@ class B2GMochitest(MochitestUtilsMixin):
         self.out_of_process = out_of_process
         self.locations_file = locations
         self.preferences = []
         self.webapps = None
         self.test_script = os.path.join(here, 'b2g_start_script.js')
         self.test_script_args = [self.out_of_process]
         self.product = 'b2g'
 
+        # structured logging
+        self.message_logger = MessageLogger(logger=log)
+
         if profile_data_dir:
             self.preferences = [os.path.join(profile_data_dir, f)
                                  for f in os.listdir(profile_data_dir) if f.startswith('pref')]
             self.webapps = [os.path.join(profile_data_dir, f)
                              for f in os.listdir(profile_data_dir) if f.startswith('webapp')]
 
         # mozinfo is populated by the parent class
         if mozinfo.info['debug']:
@@ -114,30 +118,44 @@ class B2GMochitest(MochitestUtilsMixin):
         return manifest
 
     def run_tests(self, options):
         """ Prepare, configure, run tests and cleanup """
 
         manifest = self.build_profile(options)
         self.leak_report_file = os.path.join(options.profilePath, "runtests_leaks.log")
 
+        # configuring the message logger's buffering
+        self.message_logger.buffering = options.quiet
+
         if options.debugger or not options.autorun:
             timeout = None
         else:
             if not options.timeout:
                 if mozinfo.info['debug']:
                     options.timeout = 420
                 else:
                     options.timeout = 300
             timeout = options.timeout + 30.0
 
         log.info("runtestsb2g.py | Running tests: start.")
         status = 0
         try:
+            def on_output(line):
+                messages = self.message_logger.write(line)
+                for message in messages:
+                    if message['action'] == 'test_start':
+                        self.runner.last_test = message['test']
+
+            # The logging will be handled by on_output, so we set the stream to None
+            process_args = {'processOutputLine': on_output,
+                            'stream': None}
+            self.marionette_args['process_args'] = process_args
             self.marionette_args['profile'] = self.profile
+
             self.marionette = Marionette(**self.marionette_args)
             self.runner = self.marionette.runner
             self.app_ctx = self.runner.app_ctx
 
             self.remote_log = posixpath.join(self.app_ctx.remote_test_root,
                                              'log', 'mochitest.log')
             if not self.app_ctx.dm.dirExists(posixpath.dirname(self.remote_log)):
                 self.app_ctx.dm.mkDirs(self.remote_log)
@@ -166,32 +184,34 @@ class B2GMochitest(MochitestUtilsMixin):
             if os.path.isfile(self.test_script):
                 with open(self.test_script, 'r') as script:
                     self.marionette.execute_script(script.read(),
                                                    script_args=self.test_script_args)
             else:
                 self.marionette.execute_script(self.test_script,
                                                script_args=self.test_script_args)
             status = self.runner.wait()
+
             if status is None:
                 # the runner has timed out
                 status = 124
         except KeyboardInterrupt:
             log.info("runtests.py | Received keyboard interrupt.\n");
             status = -1
         except:
             traceback.print_exc()
             log.error("Automation Error: Received unexpected exception while running application\n")
             if hasattr(self, 'runner'):
                 self.runner.check_for_crashes()
             status = 1
 
         self.stopServers()
 
         log.info("runtestsb2g.py | Running tests: end.")
+        self.message_logger.finish()
 
         if manifest is not None:
             self.cleanup(manifest, options)
         return status
 
 
 class B2GDeviceMochitest(B2GMochitest, Mochitest):
     remote_log = None
--- a/testing/mochitest/runtestsremote.py
+++ b/testing/mochitest/runtestsremote.py
@@ -1,39 +1,42 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-import sys
+import base64
+import json
+import math
 import os
-import time
-import tempfile
 import re
+import shutil
+import sys
+import tempfile
 import traceback
-import shutil
-import math
-import base64
 
 sys.path.insert(0, os.path.abspath(os.path.realpath(os.path.dirname(__file__))))
 
 from automation import Automation
 from remoteautomation import RemoteAutomation, fennecLogcatFilters
-from runtests import Mochitest
-from runtests import MochitestServer
+from runtests import Mochitest, MessageLogger, MochitestFormatter
 from mochitest_options import MochitestOptions
 
 import devicemanager
 import droid
 import manifestparser
 import mozinfo
-import mozlog
 import moznetwork
+from mozlog.structured.handlers import StreamHandler
+from mozlog.structured.structuredlog import StructuredLogger
+
+log = StructuredLogger('Mochi-Remote')
+stream_handler = StreamHandler(stream=sys.stdout, formatter=MochitestFormatter())
+log.add_handler(stream_handler)
 
 SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
-log = mozlog.getLogger('Mochi-Remote')
 
 class RemoteOptions(MochitestOptions):
 
     def __init__(self, automation, **kwargs):
         defaults = {}
         self._automation = automation or Automation()
         MochitestOptions.__init__(self)
 
@@ -177,29 +180,29 @@ class RemoteOptions(MochitestOptions):
                 log.error("can not use deprecated --robocop-path and replacement --robocop-apk together")
                 return None
             options.robocopApk = os.path.join(options.robocopPath, 'robocop.apk')
             del options.robocopPath
 
         # Robocop specific options
         if options.robocopIni != "":
             if not os.path.exists(options.robocopIni):
-                log.error("Unable to find specified robocop .ini manifest '%s'", options.robocopIni)
+                log.error("Unable to find specified robocop .ini manifest '%s'" % options.robocopIni)
                 return None
             options.robocopIni = os.path.abspath(options.robocopIni)
 
         if options.robocopApk != "":
             if not os.path.exists(options.robocopApk):
-                log.error("Unable to find robocop APK '%s'", options.robocopApk)
+                log.error("Unable to find robocop APK '%s'" % options.robocopApk)
                 return None
             options.robocopApk = os.path.abspath(options.robocopApk)
 
         if options.robocopIds != "":
             if not os.path.exists(options.robocopIds):
-                log.error("Unable to find specified robocop IDs file '%s'", options.robocopIds)
+                log.error("Unable to find specified robocop IDs file '%s'" % options.robocopIds)
                 return None
             options.robocopIds = os.path.abspath(options.robocopIds)
 
         # allow us to keep original application around for cleanup while running robocop via 'am'
         options.remoteappname = options.app
         return options
 
     def verifyOptions(self, options, mochitest):
@@ -220,37 +223,39 @@ class RemoteOptions(MochitestOptions):
 
         return options
 
 class MochiRemote(Mochitest):
 
     _automation = None
     _dm = None
     localProfile = None
-    logLines = []
+    logMessages = []
 
-    def __init__(self, automation, devmgr, options):
+    def __init__(self, automation, devmgr, options, message_logger=None):
         self._automation = automation
         Mochitest.__init__(self)
         self._dm = devmgr
         self.environment = self._automation.environment
         self.remoteProfile = options.remoteTestRoot + "/profile"
         self._automation.setRemoteProfile(self.remoteProfile)
         self.remoteLog = options.remoteLogFile
         self.localLog = options.logFile
         self._automation.deleteANRs()
         self.certdbNew = True
 
+        # structured logging
+        self.message_logger = message_logger or MessageLogger(logger=log)
+
     def cleanup(self, options):
         if self._dm.fileExists(self.remoteLog):
             self._dm.getFile(self.remoteLog, self.localLog)
             self._dm.removeFile(self.remoteLog)
         else:
-            log.warn("Unable to retrieve log file (%s) from remote device",
-                self.remoteLog)
+            log.warning("Unable to retrieve log file (%s) from remote device" % self.remoteLog)
         self._dm.removeDir(self.remoteProfile)
         Mochitest.cleanup(self, options)
 
     def findPath(self, paths, filename = None):
         for path in paths:
             p = path
             if filename:
                 p = os.path.join(p, filename)
@@ -290,31 +295,31 @@ class MochiRemote(Mochitest):
         paths = [
             options.xrePath,
             localAutomation.DIST_BIN,
             self._automation._product,
             os.path.join('..', self._automation._product)
         ]
         options.xrePath = self.findPath(paths)
         if options.xrePath == None:
-            log.error("unable to find xulrunner path for %s, please specify with --xre-path", os.name)
+            log.error("unable to find xulrunner path for %s, please specify with --xre-path" % os.name)
             sys.exit(1)
 
         xpcshell = "xpcshell"
         if (os.name == "nt"):
             xpcshell += ".exe"
 
         if options.utilityPath:
             paths = [options.utilityPath, options.xrePath]
         else:
             paths = [options.xrePath]
         options.utilityPath = self.findPath(paths, xpcshell)
 
         if options.utilityPath == None:
-            log.error("unable to find utility path for %s, please specify with --utility-path", os.name)
+            log.error("unable to find utility path for %s, please specify with --utility-path" % os.name)
             sys.exit(1)
 
         xpcshell_path = os.path.join(options.utilityPath, xpcshell)
         if localAutomation.elf_arm(xpcshell_path):
             log.error('xpcshell at %s is an ARM binary; please use '
                       'the --utility-path argument to specify the path '
                       'to a desktop version.' % xpcshell_path)
             sys.exit(1)
@@ -411,58 +416,63 @@ class MochiRemote(Mochitest):
 
     def getLogFilePath(self, logFile):
         return logFile
 
     # In the future we could use LogParser: http://hg.mozilla.org/automation/logparser/
     def addLogData(self):
         with open(self.localLog) as currentLog:
             data = currentLog.readlines()
-
-        restart = re.compile('0 INFO SimpleTest START.*')
-        reend = re.compile('([0-9]+) INFO TEST-START . Shutdown.*')
-        refail = re.compile('([0-9]+) INFO TEST-UNEXPECTED-FAIL.*')
         start_found = False
         end_found = False
         fail_found = False
         for line in data:
-            if reend.match(line):
+            try:
+                message = json.loads(line)
+                if not isinstance(message, dict) or not 'action' in message:
+                    continue
+            except ValueError:
+                continue
+
+            if message['action'] == 'test_end':
                 end_found = True
                 start_found = False
                 break
 
             if start_found and not end_found:
-                # Append the line without the number to increment
-                self.logLines.append(' '.join(line.split(' ')[1:]))
+                self.logMessages.append(message)
 
-            if restart.match(line):
+            if message['action'] == 'test_start':
                 start_found = True
-            if refail.match(line):
+            if 'expected' in message:
                 fail_found = True
         result = 0
         if fail_found:
             result = 1
         if not end_found:
             log.error("Automation Error: Missing end of test marker (process crashed?)")
             result = 1
         return result
 
     def printLog(self):
         passed = 0
         failed = 0
         todo = 0
         incr = 1
         logFile = []
         logFile.append("0 INFO SimpleTest START")
-        for line in self.logLines:
-            if line.startswith("INFO TEST-PASS"):
+        for message in self.logMessages:
+            if 'status' not in message:
+                continue
+
+            if 'expected' in message:
+                failed += 1
+            elif message['status'] == 'PASS':
                 passed += 1
-            elif line.startswith("INFO TEST-UNEXPECTED"):
-                failed += 1
-            elif line.startswith("INFO TEST-KNOWN"):
+            elif message['status'] == 'FAIL':
                 todo += 1
             incr += 1
 
         logFile.append("%s INFO TEST-START | Shutdown" % incr)
         incr += 1
         logFile.append("%s INFO Passed: %s" % (incr, passed))
         incr += 1
         logFile.append("%s INFO Failed: %s" % (incr, failed))
@@ -484,35 +494,35 @@ class MochiRemote(Mochitest):
         # TODO: This can be re-written after completion of bug 749421
         if not self._dm.dirExists(screenShotDir):
             log.info("SCREENSHOT: No ScreenShots directory available: " + screenShotDir)
             return
 
         printed = 0
         for name in self._dm.listFiles(screenShotDir):
             fullName = screenShotDir + "/" + name
-            log.info("SCREENSHOT: FOUND: [%s]", fullName)
+            log.info("SCREENSHOT: FOUND: [%s]" % fullName)
             try:
                 image = self._dm.pullFile(fullName)
                 encoded = base64.b64encode(image)
-                log.info("SCREENSHOT: data:image/jpg;base64,%s", encoded)
+                log.info("SCREENSHOT: data:image/jpg;base64,%s" % encoded)
                 printed += 1
             except:
                 log.info("SCREENSHOT: Could not be parsed")
                 pass
 
-        log.info("SCREENSHOT: TOTAL PRINTED: [%s]", printed)
+        log.info("SCREENSHOT: TOTAL PRINTED: [%s]" % printed)
 
     def printDeviceInfo(self, printLogcat=False):
         try:
             if printLogcat:
                 logcat = self._dm.getLogcat(filterOutRegexps=fennecLogcatFilters)
-                log.info('\n'+(''.join(logcat)))
-            log.info("Device info: %s", self._dm.getInfo())
-            log.info("Test root: %s", self._dm.deviceRoot)
+                log.info('\n' + ''.join(logcat).decode('utf-8', 'replace'))
+            log.info("Device info: %s" % self._dm.getInfo())
+            log.info("Test root: %s" % self._dm.deviceRoot)
         except devicemanager.DMError:
             log.warn("Error getting device information")
 
     def buildRobotiumConfig(self, options, browserEnv):
         deviceRoot = self._dm.deviceRoot
         fHandle = tempfile.NamedTemporaryFile(suffix='.config',
                                               prefix='robotium-',
                                               dir=os.getcwd(),
@@ -523,19 +533,19 @@ class MochiRemote(Mochitest):
         fHandle.write("rawhost=http://%s:%s/tests\n" % (options.remoteWebServer, options.httpPort))
 
         if browserEnv:
             envstr = ""
             delim = ""
             for key, value in browserEnv.items():
                 try:
                     value.index(',')
-                    log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s", key, value)
-                    log.error("browserEnv=%s", browserEnv)
-                except ValueError, e:
+                    log.error("buildRobotiumConfig: browserEnv - Found a ',' in our value, unable to process value. key=%s,value=%s" % (key, value))
+                    log.error("browserEnv=%s" % browserEnv)
+                except ValueError:
                     envstr += "%s%s=%s" % (delim, key, value)
                     delim = ","
 
             fHandle.write("envvars=%s\n" % envstr)
         fHandle.close()
 
         self._dm.removeFile(os.path.join(deviceRoot, "robotium.config"))
         self._dm.pushFile(fHandle.name, os.path.join(deviceRoot, "robotium.config"))
@@ -553,20 +563,95 @@ class MochiRemote(Mochitest):
         # whereas runtest.py's `runApp` takes a mozprofile object.
         if 'profileDir' not in kwargs and 'profile' in kwargs:
             kwargs['profileDir'] = kwargs.pop('profile').profile
 
         # We're handling ssltunnel, so we should lie to automation.py to avoid
         # it trying to set up ssltunnel as well
         kwargs['runSSLTunnel'] = False
 
+        if 'quiet' in kwargs:
+            kwargs.pop('quiet')
+
         return self._automation.runApp(*args, **kwargs)
 
+class RobocopMochiRemote(MochiRemote):
+    """This class maintains compatibility with the robocop logging system
+    that is still unstructured."""
+
+    def addLogData(self):
+        with open(self.localLog) as currentLog:
+            data = currentLog.readlines()
+
+        restart = re.compile('SimpleTest START.*')
+        reend = re.compile('TEST-START . Shutdown.*')
+        refail = re.compile('TEST-UNEXPECTED-FAIL.*')
+        start_found = False
+        end_found = False
+        fail_found = False
+        for line in data:
+            if reend.match(line):
+                end_found = True
+                start_found = False
+                break
+
+            if start_found and not end_found:
+                self.logMessages.append(line)
+
+            if restart.match(line):
+                start_found = True
+            if refail.match(line):
+                fail_found = True
+        result = 0
+        if fail_found:
+            result = 1
+        if not end_found:
+            log.error("Automation Error: Missing end of test marker (process crashed?)")
+            result = 1
+        return result
+
+    def printLog(self):
+        passed = 0
+        failed = 0
+        todo = 0
+        incr = 1
+        logFile = []
+        logFile.append("0 INFO SimpleTest START")
+        for line in self.logMessages:
+            if line.startswith("TEST-PASS"):
+                passed += 1
+            elif line.startswith("TEST-UNEXPECTED"):
+                failed += 1
+            elif line.startswith("TEST-KNOWN"):
+                todo += 1
+            incr += 1
+
+        logFile.append("%s INFO TEST-START | Shutdown" % incr)
+        incr += 1
+        logFile.append("%s INFO Passed: %s" % (incr, passed))
+        incr += 1
+        logFile.append("%s INFO Failed: %s" % (incr, failed))
+        incr += 1
+        logFile.append("%s INFO Todo: %s" % (incr, todo))
+        incr += 1
+        logFile.append("%s INFO SimpleTest FINISHED" % incr)
+
+        # TODO: Consider not printing to stdout because we might be duplicating output
+        print '\n'.join(logFile)
+        with open(self.localLog, 'w') as localLog:
+            localLog.write('\n'.join(logFile))
+
+        if failed > 0:
+            return 1
+        return 0
+
 def main():
-    auto = RemoteAutomation(None, "fennec")
+    message_logger = MessageLogger(logger=log)
+    process_args = {'messageLogger': message_logger}
+    auto = RemoteAutomation(None, "fennec", processArgs=process_args)
     parser = RemoteOptions(auto)
     options, args = parser.parse_args()
 
     if (options.dm_trans == "adb"):
         if (options.deviceIP):
             dm = droid.DroidADB(options.deviceIP, options.devicePort, deviceRoot=options.remoteTestRoot)
         else:
             dm = droid.DroidADB(deviceRoot=options.remoteTestRoot)
@@ -580,17 +665,18 @@ def main():
 
     productPieces = options.remoteProductName.split('.')
     if (productPieces != None):
         auto.setProduct(productPieces[0])
     else:
         auto.setProduct(options.remoteProductName)
     auto.setAppName(options.remoteappname)
 
-    mochitest = MochiRemote(auto, dm, options)
+    mochitest_cls = RobocopMochiRemote if options.robocopIni != "" else MochiRemote
+    mochitest = mochitest_cls(auto, dm, options, message_logger)
 
     options = parser.verifyOptions(options, mochitest)
     if (options == None):
         sys.exit(1)
 
     logParent = os.path.dirname(options.remoteLogFile)
     dm.mkDir(logParent);
     auto.setRemoteLog(options.remoteLogFile)
@@ -623,25 +709,27 @@ def main():
         mp = manifestparser.TestManifest(strict=False)
         # TODO: pull this in dynamically
         mp.read(options.robocopIni)
         robocop_tests = mp.active_tests(exists=False, **mozinfo.info)
         tests = []
         my_tests = tests
         for test in robocop_tests:
             tests.append(test['name'])
+        # suite_start message when running robocop tests
+        log.suite_start(tests)
 
         if options.totalChunks:
             tests_per_chunk = math.ceil(len(tests) / (options.totalChunks * 1.0))
             start = int(round((options.thisChunk-1) * tests_per_chunk))
             end = int(round(options.thisChunk * tests_per_chunk))
             if end > len(tests):
                 end = len(tests)
             my_tests = tests[start:end]
-            log.info("Running tests %d-%d/%d", start+1, end, len(tests))
+            log.info("Running tests %d-%d/%d" % (start+1, end, len(tests)))
 
         dm.removeFile(os.path.join(deviceRoot, "fennec_ids.txt"))
         fennec_ids = os.path.abspath(os.path.join(SCRIPT_DIR, "fennec_ids.txt"))
         if not os.path.exists(fennec_ids) and options.robocopIds:
             fennec_ids = options.robocopIds
         dm.pushFile(fennec_ids, os.path.join(deviceRoot, "fennec_ids.txt"))
         options.extraPrefs.append('browser.search.suggest.enabled=true')
         options.extraPrefs.append('browser.search.suggest.prompted=true')
@@ -700,17 +788,17 @@ def main():
                    if (options.dm_trans == "sut"):
                        dm._runCmds([{"cmd": " ".join(cmd)}])
             try:
                 screenShotDir = "/mnt/sdcard/Robotium-Screenshots"
                 dm.removeDir(screenShotDir)
                 dm.recordLogcat()
                 result = mochitest.runTests(options)
                 if result != 0:
-                    log.error("runTests() exited with code %s", result)
+                    log.error("runTests() exited with code %s" % result)
                 log_result = mochitest.addLogData()
                 if result != 0 or log_result != 0:
                     mochitest.printDeviceInfo(printLogcat=True)
                     mochitest.printScreenshots(screenShotDir)
                 # Ensure earlier failures aren't overwritten by success on this run
                 if retVal is None or retVal == 0:
                     retVal = result
             except:
@@ -729,17 +817,17 @@ def main():
                 if test['name'] == "testImportFromAndroid":
                     if ("pandaboard" in devOS):
                         cmd_del = ['execsu', 'sqlite3', "/data/data/com.android.browser/databases/browser2.db \'delete from bookmarks where _id > 14;\'"]
                     else:
                         cmd_del = ['execsu', 'sqlite3', "/data/data/com.android.browser/databases/browser.db \'delete from bookmarks where _id > 14;\'"]
                     if (options.dm_trans == "sut"):
                         dm._runCmds([{"cmd": " ".join(cmd_del)}])
         if retVal is None:
-            log.warn("No tests run. Did you pass an invalid TEST_PATH?")
+            log.warning("No tests run. Did you pass an invalid TEST_PATH?")
             retVal = 1
         else:
             # if we didn't have some kind of error running the tests, make
             # sure the tests actually passed
             print "INFO | runtests.py | Test summary: start."
             overallResult = mochitest.printLog()
             print "INFO | runtests.py | Test summary: end."
             if retVal == 0:
--- a/testing/mochitest/tests/SimpleTest/MemoryStats.js
+++ b/testing/mochitest/tests/SimpleTest/MemoryStats.js
@@ -48,17 +48,17 @@ MemoryStats._nsIFile = function (pathnam
 }
 
 MemoryStats.constructPathname = function (directory, basename) {
     var d = MemoryStats._nsIFile(directory);
     d.append(basename);
     return d.path;
 }
 
-MemoryStats.dump = function (dumpFn,
+MemoryStats.dump = function (logger,
                              testNumber,
                              testURL,
                              dumpOutputDirectory,
                              dumpAboutMemory,
                              dumpDMD) {
     var mrm = MemoryStats._getService("@mozilla.org/memory-reporter-manager;1",
                                       "nsIMemoryReporterManager");
     for (var stat in MemoryStats._hasMemoryStatistics) {
@@ -70,35 +70,34 @@ MemoryStats.dump = function (dumpFn,
                 var value = mrm[stat];
                 supported = MEM_STAT_SUPPORTED;
             } catch (e) {
                 supported = MEM_STAT_UNSUPPORTED;
             }
             MemoryStats._hasMemoryStatistics[stat] = supported;
         }
         if (supported == MEM_STAT_SUPPORTED) {
-            dumpFn("TEST-INFO | MEMORY STAT " + stat + " after test: " + mrm[stat]);
+            logger.info("MEMORY STAT " + stat + " after test: " + mrm[stat]);
         } else if (firstAccess) {
-            dumpFn("TEST-INFO | MEMORY STAT " + stat + " not supported in this build configuration.");
+            logger.info("MEMORY STAT " + stat + " not supported in this build configuration.");
         }
     }
 
     if (dumpAboutMemory) {
         var basename = "about-memory-" + testNumber + ".json.gz";
         var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
                                                      basename);
-        dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-START " + dumpfile);
+        logger.info(testURL + " | MEMDUMP-START " + dumpfile);
         var md = MemoryStats._getService("@mozilla.org/memory-info-dumper;1",
                                          "nsIMemoryInfoDumper");
         md.dumpMemoryReportsToNamedFile(dumpfile, function () {
-            dumpFn("TEST-INFO | " + testURL + " | MEMDUMP-END");
+            logger.info("TEST-INFO | " + testURL + " | MEMDUMP-END");
         }, null, /* anonymize = */ false);
-
     }
 
     if (dumpDMD && typeof(DMDReportAndDump) != undefined) {
         var basename = "dmd-" + testNumber + ".txt";
         var dumpfile = MemoryStats.constructPathname(dumpOutputDirectory,
                                                      basename);
-        dumpFn("TEST-INFO | " + testURL + " | DMD-DUMP " + dumpfile);
+        logger.info(testURL + " | DMD-DUMP " + dumpfile);
         DMDReportAndDump(dumpfile);
     }
 };
--- a/testing/mochitest/tests/SimpleTest/SimpleTest.js
+++ b/testing/mochitest/tests/SimpleTest/SimpleTest.js
@@ -86,17 +86,17 @@ if (typeof(repr) == 'undefined') {
             o = ostring.replace(/^\s+/, "");
             var idx = o.indexOf("{");
             if (idx != -1) {
                 o = o.substr(0, idx) + "{...}";
             }
         }
         return ostring;
     };
-} 
+}
 
 /* This returns a function that applies the previously given parameters.
  * This is used by SimpleTest.showReport
  */
 if (typeof(partial) == 'undefined') {
     this.partial = function(func) {
         var args = [];
         for (var i = 1; i < arguments.length; i++) {
@@ -111,17 +111,17 @@ if (typeof(partial) == 'undefined') {
             func(args);
         };
     };
 }
 
 if (typeof(getElement) == 'undefined') {
     this.getElement = function(id) {
         return ((typeof(id) == "string") ?
-            document.getElementById(id) : id); 
+            document.getElementById(id) : id);
     };
     this.$ = this.getElement;
 }
 
 SimpleTest._newCallStack = function(path) {
     var rval = function () {
         var callStack = arguments.callee.callStack;
         for (var i = 0; i < callStack.length; i++) {
@@ -185,20 +185,20 @@ if (typeof(computedStyle) == 'undefined'
         }
         if (typeof(document.defaultView) == 'undefined' || document === null) {
             return undefined;
         }
         var style = document.defaultView.getComputedStyle(elem, null);
         if (typeof(style) == 'undefined' || style === null) {
             return undefined;
         }
-        
+
         var selectorCase = cssProperty.replace(/([A-Z])/g, '-$1'
             ).toLowerCase();
-            
+
         return style.getPropertyValue(selectorCase);
     };
 }
 
 /**
  * Check for OOP test plugin
 **/
 SimpleTest.testPluginIsOOP = function () {
@@ -230,17 +230,19 @@ SimpleTest._tests = [];
 SimpleTest._stopOnLoad = true;
 SimpleTest._cleanupFunctions = [];
 
 /**
  * Something like assert.
 **/
 SimpleTest.ok = function (condition, name, diag) {
     var test = {'result': !!condition, 'name': name, 'diag': diag};
-    SimpleTest._logResult(test, "TEST-PASS", "TEST-UNEXPECTED-FAIL");
+    var successInfo = {status:"PASS", expected:"PASS", message:"TEST-PASS"};
+    var failureInfo = {status:"FAIL", expected:"PASS", message:"TEST-UNEXPECTED-FAIL"};
+    SimpleTest._logResult(test, successInfo, failureInfo);
     SimpleTest._tests.push(test);
 };
 
 /**
  * Roughly equivalent to ok(a==b, name)
 **/
 SimpleTest.is = function (a, b, name) {
     var pass = (a == b);
@@ -279,17 +281,19 @@ SimpleTest.doesThrow = function(fn, name
     } catch (ex) { gotException = true; }
     ok(gotException, name);
 };
 
 //  --------------- Test.Builder/Test.More todo() -----------------
 
 SimpleTest.todo = function(condition, name, diag) {
     var test = {'result': !!condition, 'name': name, 'diag': diag, todo: true};
-    SimpleTest._logResult(test, "TEST-UNEXPECTED-PASS", "TEST-KNOWN-FAIL");
+    var successInfo = {status:"PASS", expected:"FAIL", message:"TEST-UNEXPECTED-PASS"};
+    var failureInfo = {status:"FAIL", expected:"FAIL", message:"TEST-KNOWN-FAIL"};
+    SimpleTest._logResult(test, successInfo, failureInfo);
     SimpleTest._tests.push(test);
 };
 
 /*
  * Returns the absolute URL to a test data file from where tests
  * are served. i.e. the file doesn't necessarely exists where tests
  * are executed.
  * (For b2g and android, mochitest are executed on the device, while
@@ -308,140 +312,75 @@ SimpleTest.getTestFileURL = function(pat
 };
 
 SimpleTest._getCurrentTestURL = function() {
     return parentRunner && parentRunner.currentTestURL ||
            typeof gTestPath == "string" && gTestPath ||
            "unknown test url";
 };
 
-SimpleTest._forceLogMessageOutput = parentRunner && !parentRunner.quiet;
+SimpleTest._forceLogMessageOutput = false;
 
 /**
  * Force all test messages to be displayed.  Only applies for the current test.
  */
 SimpleTest.requestCompleteLog = function() {
-    if (SimpleTest._forceLogMessageOutput)
+    if (!parentRunner || SimpleTest._forceLogMessageOutput) {
         return;
+    }
 
+    parentRunner.structuredLogger.deactivateBuffering();
     SimpleTest._forceLogMessageOutput = true;
+
     SimpleTest.registerCleanupFunction(function() {
+        parentRunner.structuredLogger.activateBuffering();
         SimpleTest._forceLogMessageOutput = false;
     });
 };
 
-/**
- * A circular buffer, managed by _logResult.  We explicitly manage the
- * circularness of the buffer, rather than resorting to .shift()/.push()
- * because explicit management is much faster.
- */
-SimpleTest._bufferedMessages = [];
-SimpleTest._logResult = (function () {
-    var bufferingThreshold = 100;
-    var outputIndex = 0;
-
-    function logResult(test, passString, failString) {
-        var url = SimpleTest._getCurrentTestURL();
-        var resultString = test.result ? passString : failString;
-        var diagnostic = test.name + (test.diag ? " - " + test.diag : "");
-        var msg = [resultString, url, diagnostic].join(" | ");
-        var isError = !test.result == !test.todo;
-
-        // Due to JavaScript's name lookup rules, it is important that
-        // the second parameter here be named identically to the isError
-        // variable declared above.
-        function dumpMessage(msg, isError) {
-            if (parentRunner) {
-                if (isError) {
-                    parentRunner.addFailedTest(url);
-                    parentRunner.error(msg);
-                } else {
-                    parentRunner.log(msg);
-                }
-            } else if (typeof dump === "function") {
-                dump(msg + "\n");
-            } else {
-                // Non-Mozilla browser?  Just do nothing.
-            }
-        }
-
-        // Detect when SimpleTest.reset() has been called, so we can
-        // reset outputIndex.  We store outputIndex as local state to
-        // avoid adding even more state to SimpleTest.
-        if (SimpleTest._bufferedMessages.length == 0) {
-            outputIndex = 0;
-        }
+SimpleTest._logResult = function (test, passInfo, failInfo) {
+    var url = SimpleTest._getCurrentTestURL();
+    var result = test.result ? passInfo : failInfo;
+    var diagnostic = test.diag || null;
+    // BUGFIX : coercing test.name to a string, because some a11y tests pass an xpconnect object
+    var subtest = test.name ? String(test.name) : null;
+    var isError = !test.result == !test.todo;
 
-        // We want to eliminate mundane TEST-PASS/TEST-KNOWN-FAIL
-        // output, since some tests produce tens of thousands of of such
-        // messages.  These messages can consume a lot of memory to
-        // generate and take a significant amount of time to output.
-        // However, the reality is that TEST-PASS messages can also be
-        // used as a form of logging via constructs like:
-        //
-        // SimpleTest.ok(true, "some informative message");
-        //
-        // And eliding the logging can be very confusing when trying to
-        // debug test failures.
-        //
-        // Hence the compromise adopted here: We buffer messages up to
-        // some limit and dump the buffer when a test failure happens.
-        // This behavior ought to provide enough context for developers
-        // looking to understand where in the test things failed.
-        if (isError) {
-            // Display this message and all the messages we have buffered.
-            if (SimpleTest._bufferedMessages.length > 0) {
-                dumpMessage("TEST-INFO | dumping last " + SimpleTest._bufferedMessages.length + " message(s)");
-                dumpMessage("TEST-INFO | if you need more context, please use SimpleTest.requestCompleteLog() in your test");
-
-                function dumpBufferedMessage(m) {
-                    dumpMessage(m, false);
-                }
-                // The latest message is just before outputIndex.
-                // The earliest message is located at outputIndex.
-                var earliest = SimpleTest._bufferedMessages.slice(outputIndex);
-                var latest = SimpleTest._bufferedMessages.slice(0, outputIndex);
-                earliest.map(dumpBufferedMessage);
-                latest.map(dumpBufferedMessage);
-
-                SimpleTest._bufferedMessages = [];
-            }
-
-            dumpMessage(msg);
-            return;
-        }
-
-        var runningSingleTest = ((parentRunner &&
-                                  parentRunner._urls.length == 1) ||
-                                 isSingleTestRun);
-        var shouldLogImmediately = (runningSingleTest ||
-                                    SimpleTest._forceLogMessageOutput);
-
-        if (!shouldLogImmediately) {
-            // Buffer the message for possible later output.
-            if (SimpleTest._bufferedMessages.length >= bufferingThreshold) {
-                if (outputIndex >= bufferingThreshold) {
-                    outputIndex = 0;
-                }
-                SimpleTest._bufferedMessages[outputIndex] = msg;
-                outputIndex++;
-            } else {
-                SimpleTest._bufferedMessages.push(msg);
+    if (parentRunner) {
+        if (!result.status || !result.expected) {
+            if (diagnostic) {
+                parentRunner.structuredLogger.info(diagnostic);
             }
             return;
         }
 
-        dumpMessage(msg);
-    }
+        if (isError) {
+            parentRunner.addFailedTest(url);
+        }
 
-    return logResult;
-})();
+        parentRunner.structuredLogger.testStatus(url,
+                                                 subtest,
+                                                 result.status,
+                                                 result.expected,
+                                                 diagnostic);
+    } else if (typeof dump === "function") {
+        var debugMsg = [url, result.message, diagnostic].join(' | ');
+        dump(debugMsg + "\n");
+    } else {
+        // Non-Mozilla browser?  Just do nothing.
+    }
+};
 
 SimpleTest.info = function(name, message) {
-    SimpleTest._logResult({result:true, name:name, diag:message}, "TEST-INFO");
+    var log = name + ' | ' + message;
+    if (parentRunner) {
+        parentRunner.structuredLogger.info(log);
+    } else {
+        dump(log + '\n');
+    }
 };
 
 /**
  * Copies of is and isnot with the call to ok replaced by a call to todo.
 **/
 
 SimpleTest.todo_is = function (a, b, name) {
     var pass = (a == b);
@@ -526,24 +465,24 @@ SimpleTest.toggle = function(el) {
 SimpleTest.toggleByClass = function (cls, evt) {
     var children = document.getElementsByTagName('div');
     var elements = [];
     for (var i=0; i<children.length; i++) {
         var child = children[i];
         var clsName = child.className;
         if (!clsName) {
             continue;
-        }    
+        }
         var classNames = clsName.split(' ');
         for (var j = 0; j < classNames.length; j++) {
             if (classNames[j] == cls) {
                 elements.push(child);
                 break;
-            }    
-        }    
+            }
+        }
     }
     for (var t=0; t<elements.length; t++) {
         //TODO: again, for-in loop over elems seems to break this
         SimpleTest.toggle(elements[t]);
     }
     if (evt)
         evt.preventDefault();
 };
@@ -859,17 +798,22 @@ SimpleTest.registerCleanupFunction = fun
 };
 
 /**
  * Finishes the tests. This is automatically called, except when
  * SimpleTest.waitForExplicitFinish() has been invoked.
 **/
 SimpleTest.finish = function() {
     if (SimpleTest._alreadyFinished) {
-        SimpleTest.ok(false, "[SimpleTest.finish()] this test already called finish!");
+        var err = "[SimpleTest.finish()] this test already called finish!";
+        if (parentRunner) {
+            parentRunner.structuredLogger.error(err);
+        } else {
+            dump(err + '\n');
+        }
     }
 
     SimpleTest._alreadyFinished = true;
 
     var afterCleanup = function() {
         if (SpecialPowers.DOMWindowUtils.isTestControllingRefreshes) {
             SimpleTest.ok(false, "test left refresh driver under test control");
             SpecialPowers.DOMWindowUtils.restoreNormalRefresh();
--- a/testing/mochitest/tests/SimpleTest/TestRunner.js
+++ b/testing/mochitest/tests/SimpleTest/TestRunner.js
@@ -60,16 +60,168 @@ function flattenArguments(lst/* ...*/) {
             }
         } else {
             res.push(o);
         }
     }
     return res;
 }
 
+
+/**
+ * StructuredLogger: Structured logger class following the mozlog.structured protocol
+ *
+ *
+**/
+var VALID_ACTIONS = ['suite_start', 'suite_end', 'test_start', 'test_end', 'test_status', 'process_output', 'log'];
+
+function StructuredLogger(name) {
+    this.name = name;
+    this.testsStarted = [];
+
+    /* test logs */
+
+    this.testStart = function(test) {
+        var data = {test: test};
+        this._logData("test_start", data);
+    };
+
+    this.testStatus = function(test, subtest, status, expected="PASS", message=null) {
+        var data = {test: test, subtest: subtest, status: status};
+
+        if (message !== null) {
+            data.message = message;
+        }
+        if (expected != status && status != 'SKIP') {
+            data.expected = expected;
+        }
+
+        this._logData("test_status", data);
+    };
+
+    this.testEnd = function(test, status, expected="OK", message=null, extra=null) {
+        var data = {test: test, status: status};
+
+        if (message !== null) {
+            data.message = message;
+        }
+        if (expected != status) {
+            data.expected = expected;
+        }
+        if (extra !== null) {
+            data.extra = extra;
+        }
+
+        if (!this.testsStarted.pop(test)) {
+            this.error("Test '" + test + "' was ended more than once or never started. " +
+                       "Ended with this data: " + JSON.stringify(data));
+            return;
+        }
+
+        this._logData("test_end", data);
+    };
+
+    this.suiteStart = function(tests, runinfo) {
+        runinfo = typeof runinfo !== "undefined" ? runinfo : null;
+
+        var data = {tests: tests};
+        if (runinfo !== null) {
+            data.runinfo = runinfo;
+        }
+
+        this._logData("suite_start", data);
+    };
+
+    this.suiteEnd = function() {
+        this._logData("suite_end");
+    };
+
+    this.testStart = function(test) {
+        this.testsStarted.push(test);
+        var data = {test: test};
+        this._logData("test_start", data);
+    };
+
+    /* log action: human readable logs */
+
+    this._log = function(level, message) {
+        // Coercing the message parameter to a string, in case an invalid value is passed.
+        message = String(message);
+        var data = {level: level, message: message};
+        this._logData('log', data);
+    };
+
+    this.debug = function(message) {
+        this._log('DEBUG', message);
+    };
+
+    this.info = function(message) {
+        this._log('INFO', message);
+    };
+
+    this.warning = function(message) {
+        this._log('WARNING', message);
+    };
+
+    this.error = function(message) {
+        this._log("ERROR", message);
+    };
+
+    this.critical = function(message) {
+        this._log('CRITICAL', message);
+    };
+
+    /* Special mochitest messages for deactivating/activating buffering */
+
+    this.deactivateBuffering = function() {
+        this._logData("buffering_off");
+    };
+    this.activateBuffering = function() {
+        this._logData("buffering_on");
+    };
+
+    /* dispatches a log to handlers */
+
+    this._logData = function(action, data) {
+        data = typeof data !== "undefined" ? data : null;
+
+        if (data === null) {
+            data = {};
+        }
+
+        var allData = {action: action,
+                       time: new Date().getTime(),
+                       thread: null,
+                       pid: null,
+                       source: this.name};
+
+        for (var attrname in data) {
+            allData[attrname] = data[attrname];
+        }
+
+        this._dumpMessage(allData);
+    };
+
+    this._dumpMessage = function(message) {
+        var str = JSON.stringify(message);
+        // BUGFIX: browser-chrome tests doesn't use LogController
+        if (Object.keys(LogController.listeners).length !== 0) {
+            LogController.log(str);
+        } else {
+            dump('\n' + str + '\n');
+        }
+    };
+
+    /* Message validation. Only checking the action for now */
+    this.validMessage = function(message) {
+        return message.action !== undefined && VALID_ACTIONS.indexOf(message.action) >= 0;
+    };
+
+}
+
 /**
  * TestRunner: A test runner for SimpleTest
  * TODO:
  *
  *  * Avoid moving iframes: That causes reloads on mozilla and opera.
  *
  *
 **/
@@ -86,17 +238,16 @@ TestRunner._expectedMinAsserts = 0;
 TestRunner._expectedMaxAsserts = 0;
 
 TestRunner.timeout = 5 * 60 * 1000; // 5 minutes.
 TestRunner.maxTimeouts = 4; // halt testing after too many timeouts
 TestRunner.runSlower = false;
 TestRunner.dumpOutputDirectory = "";
 TestRunner.dumpAboutMemoryAfterTest = false;
 TestRunner.dumpDMDAfterTest = false;
-TestRunner.quiet = false;
 TestRunner.slowestTestTime = 0;
 TestRunner.slowestTestURL = "";
 
 TestRunner._expectingProcessCrash = false;
 
 /**
  * Make sure the tests don't hang indefinitely.
 **/
@@ -121,16 +272,17 @@ TestRunner._checkForHangs = function() {
     }
   }
 
   if (TestRunner._currentTest < TestRunner._urls.length) {
     var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
     if (runtime >= TestRunner.timeout * TestRunner._timeoutFactor) {
       var frameWindow = $('testframe').contentWindow.wrappedJSObject ||
                           $('testframe').contentWindow;
+      // TODO : Do this in a way that reports that the test ended with a status "TIMEOUT"
       reportError(frameWindow, "Test timed out.");
 
       // If we have too many timeouts, give up. We don't want to wait hours
       // for results if some bug causes lots of tests to time out.
       if (++TestRunner._numTimeouts >= TestRunner.maxTimeouts) {
         TestRunner._haltTests = true;
 
         TestRunner.currentTestURL = "(SimpleTest/TestRunner.js)";
@@ -200,29 +352,29 @@ TestRunner.generateFailureList = functio
         failures.log(JSON.stringify(TestRunner._failedTests));
         failures.close();
     }
 };
 
 /**
  * If logEnabled is true, this is the logger that will be used.
 **/
-TestRunner.logger = LogController;
+TestRunner.structuredLogger = new StructuredLogger('mochitest');
 
 TestRunner.log = function(msg) {
     if (TestRunner.logEnabled) {
-        TestRunner.logger.log(msg);
+        TestRunner.structuredLogger.info(msg);
     } else {
         dump(msg + "\n");
     }
 };
 
 TestRunner.error = function(msg) {
     if (TestRunner.logEnabled) {
-        TestRunner.logger.error(msg);
+        TestRunner.structuredLogger.error(msg);
     } else {
         dump(msg + "\n");
     }
 
     if (TestRunner.runUntilFailure) {
       TestRunner._haltTests = true;
     }
 
@@ -259,17 +411,17 @@ TestRunner._makeIframe = function (url, 
         window.focus();
         SpecialPowers.focus();
         iframe.focus();
         if (retry < 3) {
             window.setTimeout('TestRunner._makeIframe("'+url+'", '+(retry+1)+')', 1000);
             return;
         }
 
-        TestRunner.log("Error: Unable to restore focus, expect failures and timeouts.");
+        TestRunner.structuredLogger.error("Unable to restore focus, expect failures and timeouts.");
     }
     window.scrollTo(0, $('indicator').offsetTop);
     iframe.src = url;
     iframe.name = url;
     iframe.width = "500";
     return iframe;
 };
 
@@ -289,37 +441,38 @@ TestRunner.getLoadedTestURL = function (
 
 /**
  * TestRunner entry point.
  *
  * The arguments are the URLs of the test to be ran.
  *
 **/
 TestRunner.runTests = function (/*url...*/) {
-    TestRunner.log("SimpleTest START");
+    TestRunner.structuredLogger.info("SimpleTest START");
     TestRunner.originalTestURL = $("current-test").innerHTML;
 
     SpecialPowers.registerProcessCrashObservers();
 
     TestRunner._urls = flattenArguments(arguments);
+
     $('testframe').src="";
     TestRunner._checkForHangs();
     TestRunner.runNextTest();
 };
 
 /**
  * Used for running a set of tests in a loop for debugging purposes
  * Takes an array of URLs
 **/
 TestRunner.resetTests = function(listURLs) {
   TestRunner._currentTest = 0;
   // Reset our "Current-test" line - functionality depends on it
   $("current-test").innerHTML = TestRunner.originalTestURL;
   if (TestRunner.logEnabled)
-    TestRunner.log("SimpleTest START Loop " + TestRunner._currentLoop);
+    TestRunner.structuredLogger.info("SimpleTest START Loop " + TestRunner._currentLoop);
 
   TestRunner._urls = listURLs;
   $('testframe').src="";
   TestRunner._checkForHangs();
   TestRunner.runNextTest();
 }
 
 /**
@@ -335,63 +488,71 @@ TestRunner.runNextTest = function() {
 
         $("current-test-path").innerHTML = url;
 
         TestRunner._currentTestStartTime = new Date().valueOf();
         TestRunner._timeoutFactor = 1;
         TestRunner._expectedMinAsserts = 0;
         TestRunner._expectedMaxAsserts = 0;
 
-        TestRunner.log("TEST-START | " + url); // used by automation.py
+        TestRunner.structuredLogger.testStart(url);
 
         TestRunner._makeIframe(url, 0);
     } else {
         $("current-test").innerHTML = "<b>Finished</b>";
         TestRunner._makeIframe("about:blank", 0);
 
-        if (parseInt($("pass-count").innerHTML) == 0 &&
-            parseInt($("fail-count").innerHTML) == 0 &&
-            parseInt($("todo-count").innerHTML) == 0)
+        var passCount = parseInt($("pass-count").innerHTML, 10);
+        var failCount = parseInt($("fail-count").innerHTML, 10);
+        var todoCount = parseInt($("todo-count").innerHTML, 10);
+
+        if (passCount === 0 &&
+            failCount === 0 &&
+            todoCount === 0)
         {
-          // No |$('testframe').contentWindow|, so manually update: ...
-          // ... the log,
-          TestRunner.error("TEST-UNEXPECTED-FAIL | (SimpleTest/TestRunner.js) | No checks actually run.");
-          // ... the count,
-          $("fail-count").innerHTML = 1;
-          // ... the indicator.
-          var indicator = $("indicator");
-          indicator.innerHTML = "Status: Fail (No checks actually run)";
-          indicator.style.backgroundColor = "red";
+            // No |$('testframe').contentWindow|, so manually update: ...
+            // ... the log,
+            TestRunner.structuredLogger.testEnd('SimpleTest/TestRunner.js',
+                                                "ERROR",
+                                                "OK",
+                                                "No checks actually run");
+            // ... the count,
+            $("fail-count").innerHTML = 1;
+            // ... the indicator.
+            var indicator = $("indicator");
+            indicator.innerHTML = "Status: Fail (No checks actually run)";
+            indicator.style.backgroundColor = "red";
         }
 
         SpecialPowers.unregisterProcessCrashObservers();
 
-        TestRunner.log("TEST-START | Shutdown"); // used by automation.py
-        TestRunner.log("Passed:  " + $("pass-count").innerHTML);
-        TestRunner.log("Failed:  " + $("fail-count").innerHTML);
-        TestRunner.log("Todo:    " + $("todo-count").innerHTML);
-        TestRunner.log("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
+        TestRunner.structuredLogger.info("TEST-START | Shutdown");
+        TestRunner.structuredLogger.info("Passed:  " + passCount);
+        TestRunner.structuredLogger.info("Failed:  " + failCount);
+        TestRunner.structuredLogger.info("Todo:    " + todoCount);
+        TestRunner.structuredLogger.info("Slowest: " + TestRunner.slowestTestTime + 'ms - ' + TestRunner.slowestTestURL);
+
         // If we are looping, don't send this cause it closes the log file
-        if (TestRunner.repeat == 0) {
-          TestRunner.log("SimpleTest FINISHED");
+        if (TestRunner.repeat === 0) {
+          TestRunner.structuredLogger.info("SimpleTest FINISHED");
         }
 
-        if (TestRunner.repeat == 0 && TestRunner.onComplete) {
+        if (TestRunner.repeat === 0 && TestRunner.onComplete) {
              TestRunner.onComplete();
          }
 
         if (TestRunner._currentLoop <= TestRunner.repeat && !TestRunner._haltTests) {
           TestRunner._currentLoop++;
           TestRunner.resetTests(TestRunner._urls);
           TestRunner._loopIsRestarting = true;
         } else {
           // Loops are finished
           if (TestRunner.logEnabled) {
-            TestRunner.log("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
-            TestRunner.log("SimpleTest FINISHED");
+            TestRunner.structuredLogger.info("TEST-INFO | Ran " + TestRunner._currentLoop + " Loops");
+            TestRunner.structuredLogger.info("SimpleTest FINISHED");
           }
 
           if (TestRunner.onComplete)
             TestRunner.onComplete();
        }
        TestRunner.generateFailureList();
     }
 };
@@ -403,68 +564,81 @@ TestRunner.expectChildProcessCrash = fun
 /**
  * This stub is called by SimpleTest when a test is finished.
 **/
 TestRunner.testFinished = function(tests) {
     // Prevent a test from calling finish() multiple times before we
     // have a chance to unload it.
     if (TestRunner._currentTest == TestRunner._lastTestFinished &&
         !TestRunner._loopIsRestarting) {
-        TestRunner.error("TEST-UNEXPECTED-FAIL | " +
-                         TestRunner.currentTestURL +
-                         " | called finish() multiple times");
+        TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
+                                            "ERROR",
+                                            "OK",
+                                            "called finish() multiple times");
         TestRunner.updateUI([{ result: false }]);
         return;
     }
     TestRunner._lastTestFinished = TestRunner._currentTest;
     TestRunner._loopIsRestarting = false;
 
-    MemoryStats.dump(TestRunner.log, TestRunner._currentTest,
+    // TODO : replace this by a function that returns the mem data as an object
+    // that's dumped later with the test_end message
+    MemoryStats.dump(TestRunner.structuredLogger, TestRunner._currentTest,
                      TestRunner.currentTestURL,
                      TestRunner.dumpOutputDirectory,
                      TestRunner.dumpAboutMemoryAfterTest,
                      TestRunner.dumpDMDAfterTest);
 
     function cleanUpCrashDumpFiles() {
         if (!SpecialPowers.removeExpectedCrashDumpFiles(TestRunner._expectingProcessCrash)) {
-            TestRunner.error("TEST-UNEXPECTED-FAIL | " +
-                             TestRunner.currentTestURL +
-                             " | This test did not leave any crash dumps behind, but we were expecting some!");
+            TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
+                                                "ERROR",
+                                                "OK",
+                                                "This test did not leave any crash dumps behind, but we were expecting some!");
             tests.push({ result: false });
         }
         var unexpectedCrashDumpFiles =
             SpecialPowers.findUnexpectedCrashDumpFiles();
         TestRunner._expectingProcessCrash = false;
         if (unexpectedCrashDumpFiles.length) {
-            TestRunner.error("TEST-UNEXPECTED-FAIL | " +
-                             TestRunner.currentTestURL +
-                             " | This test left crash dumps behind, but we " +
-                             "weren't expecting it to!");
+            TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
+                                                "ERROR",
+                                                "OK",
+                                                "This test left crash dumps behind, but we " +
+                                                "weren't expecting it to!",
+                                                {unexpected_crashdump_files: unexpectedCrashDumpFiles});
             tests.push({ result: false });
             unexpectedCrashDumpFiles.sort().forEach(function(aFilename) {
-                TestRunner.log("TEST-INFO | Found unexpected crash dump file " +
-                               aFilename + ".");
+                TestRunner.structuredLogger.info("Found unexpected crash dump file " +
+                                                 aFilename + ".");
             });
         }
     }
 
     function runNextTest() {
         if (TestRunner.currentTestURL != TestRunner.getLoadedTestURL()) {
-            TestRunner.error("TEST-UNEXPECTED-FAIL | " +
-                             TestRunner.currentTestURL +
-                             " | " + TestRunner.getLoadedTestURL() +
-                             " finished in a non-clean fashion, probably" +
-                             " because it didn't call SimpleTest.finish()");
+            TestRunner.structuredLogger.testStatus(TestRunner.currentTestURL,
+                                                   TestRunner.getLoadedTestURL(),
+                                                   "ERROR",
+                                                   "OK",
+                                                   "finished in a non-clean fashion, probably" +
+                                                   " because it didn't call SimpleTest.finish()",
+                                                   {loaded_test_url: TestRunner.getLoadedTestURL()});
             tests.push({ result: false });
         }
 
         var runtime = new Date().valueOf() - TestRunner._currentTestStartTime;
-        TestRunner.log("TEST-END | " +
-                       TestRunner.currentTestURL +
-                       " | finished in " + runtime + "ms");
+
+        TestRunner.structuredLogger.testEnd(TestRunner.currentTestURL,
+                                            "OK",
+                                            undefined,
+                                            "Finished in " + runtime + "ms",
+                                            {runtime: runtime}
+        );
+
         if (TestRunner.slowestTestTime < runtime && TestRunner._timeoutFactor == 1) {
           TestRunner.slowestTestTime = runtime;
           TestRunner.slowestTestURL = TestRunner.currentTestURL;
         }
 
         TestRunner.updateUI(tests);
 
         var interstitialURL;
@@ -491,23 +665,38 @@ TestRunner.testUnloaded = function() {
         var newAssertionCount = SpecialPowers.assertionCount();
         var numAsserts = newAssertionCount - TestRunner._lastAssertionCount;
         TestRunner._lastAssertionCount = newAssertionCount;
 
         var url = TestRunner._urls[TestRunner._currentTest];
         var max = TestRunner._expectedMaxAsserts;
         var min = TestRunner._expectedMinAsserts;
         if (numAsserts > max) {
-            TestRunner.error("TEST-UNEXPECTED-FAIL | " + url + " | Assertion count " + numAsserts + " is greater than expected range " + min + "-" + max + " assertions.");
+            TestRunner.structuredLogger.testEnd(url,
+                                                "ERROR",
+                                                "OK",
+                                                "Assertion count " + numAsserts + " is greater than expected range " +
+                                                min + "-" + max + " assertions.",
+                                                {assertions: numAsserts, min_asserts: min, max_asserts: max});
             TestRunner.updateUI([{ result: false }]);
         } else if (numAsserts < min) {
-            TestRunner.error("TEST-UNEXPECTED-PASS | " + url + " | Assertion count " + numAsserts + " is less than expected range " + min + "-" + max + " assertions.");
+            TestRunner.structuredLogger.testEnd(url,
+                                                "OK",
+                                                "ERROR",
+                                                "Assertion count " + numAsserts + " is less than expected range " +
+                                                min + "-" + max + " assertions.",
+                                                {assertions: numAsserts, min_asserts: min, max_asserts: max});
             TestRunner.updateUI([{ result: false }]);
         } else if (numAsserts > 0) {
-            TestRunner.log("TEST-KNOWN-FAIL | " + url + " | Assertion count " + numAsserts + " within expected range " + min + "-" + max + " assertions.");
+            TestRunner.structuredLogger.testEnd(url,
+                                                "ERROR",
+                                                "ERROR",
+                                                "Assertion count " + numAsserts + " within expected range " +
+                                                min + "-" + max + " assertions.",
+                                                {assertions: numAsserts, min_asserts: min, max_asserts: max});
         }
     }
     TestRunner._currentTest++;
     if (TestRunner.runSlower) {
         setTimeout(TestRunner.runNextTest, 1000);
     } else {
         TestRunner.runNextTest();
     }
--- a/testing/mochitest/tests/SimpleTest/setup.js
+++ b/testing/mochitest/tests/SimpleTest/setup.js
@@ -5,17 +5,17 @@
  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
 
 TestRunner.logEnabled = true;
 TestRunner.logger = LogController;
 
 /* Helper function */
 parseQueryString = function(encodedString, useArrays) {
   // strip a leading '?' from the encoded string
-  var qstr = (encodedString[0] == "?") ? encodedString.substring(1) : 
+  var qstr = (encodedString[0] == "?") ? encodedString.substring(1) :
                                          encodedString;
   var pairs = qstr.replace(/\+/g, "%20").split(/(\&amp\;|\&\#38\;|\&#x26;|\&)/);
   var o = {};
   var decode;
   if (typeof(decodeURIComponent) != "undefined") {
     decode = decodeURIComponent;
   } else {
     decode = unescape;
@@ -87,17 +87,17 @@ if (params.timeout) {
 
 // log levels for console and logfile
 var fileLevel =  params.fileLevel || null;
 var consoleLevel = params.consoleLevel || null;
 
 // repeat tells us how many times to repeat the tests
 if (params.repeat) {
   TestRunner.repeat = params.repeat;
-} 
+}
 
 if (params.runUntilFailure) {
   TestRunner.runUntilFailure = true;
 }
 
 // closeWhenDone tells us to close the browser when complete
 if (params.closeWhenDone) {
   TestRunner.onComplete = SpecialPowers.quit;
@@ -130,27 +130,24 @@ if (params.dumpOutputDirectory) {
 if (params.dumpAboutMemoryAfterTest) {
   TestRunner.dumpAboutMemoryAfterTest = true;
 }
 
 if (params.dumpDMDAfterTest) {
   TestRunner.dumpDMDAfterTest = true;
 }
 
-if (params.quiet) {
-  TestRunner.quiet = true;
-}
-
 // Log things to the console if appropriate.
 TestRunner.logger.addListener("dumpListener", consoleLevel + "", function(msg) {
-  dump(msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
+  var data = formatLogMessage(msg);
+  dump(data);
 });
 
 var gTestList = [];
-var RunSet = {}
+var RunSet = {};
 RunSet.runall = function(e) {
   // Filter tests to include|exclude tests based on data in params.filter.
   // This allows for including or excluding tests from the gTestList
   if (params.testManifest) {
     getTestManifest("http://mochi.test:8888/" + params.testManifest, params, function(filter) { gTestList = filterTests(filter, gTestList, params.runOnly); RunSet.runtests(); });
   } else {
     RunSet.runtests();
   }
@@ -185,17 +182,17 @@ RunSet.reloadAndRunAll = function(e) {
   var addParam = "";
   if (params.autorun) {
     window.location.search += "";
     window.location.href = window.location.href;
   } else if (window.location.search) {
     window.location.href += "&autorun=1";
   } else {
     window.location.href += "?autorun=1";
-  }  
+  }
 };
 
 // UI Stuff
 function toggleVisible(elem) {
     toggleElementClass("invisible", elem);
 }
 
 function makeVisible(elem) {
@@ -240,14 +237,14 @@ function hookupTests(testList) {
   } else {
     gTestList = [];
     for (var obj in testList) {
         gTestList.push(obj);
     }
   }
 
   document.getElementById('runtests').onclick = RunSet.reloadAndRunAll;
-  document.getElementById('toggleNonTests').onclick = toggleNonTests; 
+  document.getElementById('toggleNonTests').onclick = toggleNonTests;
   // run automatically if autorun specified
   if (params.autorun) {
     RunSet.runall();
   }
 }
--- a/testing/mochitest/tests/index.html
+++ b/testing/mochitest/tests/index.html
@@ -28,41 +28,33 @@ if (params.closeWhenDone) {
 }
 
 // logFile to write our results
 if (params.logFile) {
   var spl = SpecialPowersLogger(params.logFile);
   TestRunner.logger.addListener("mozLogger", fileLevel + "", spl.getLogCallback());
 }
 
-// if we get a quiet param, don't log to the console
-if (!params.quiet) {
-  function dumpListener(msg) {
-    dump("*** " + msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n");
-  }
-  TestRunner.logger.addListener("dumpListener", consoleLevel + "", dumpListener);
-}
-
-var RunSet = {}
+var RunSet = {};
 RunSet.runall = function() {
   TestRunner.runTests(
       'test_bug362788.xhtml'
   );
 };
 RunSet.reloadAndRunAll = function() {
   if (params.autorun) {
     window.location.href = window.location.href;
   } else if (location.search) {
     window.location.href = window.location.href + "&autorun=1";
   } else {
     window.location.href = window.location.href + "?autorun=1";
   }
 };
 
-// run automatically if 
+// run automatically if
 if (params.autorun) {
   RunSet.runall();
 }
 
 // hook up our buttons
 connect("runtests", "onclick", RunSet, "reloadAndRunAll");
 
 </script>
--- a/testing/mozbase/mozlog/mozlog/structured/formatters/machformatter.py
+++ b/testing/mozbase/mozlog/mozlog/structured/formatters/machformatter.py
@@ -1,17 +1,16 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import time
 
 import base
 
-
 def format_seconds(total):
     """Format number of seconds to MM:SS.DD form."""
     minutes, seconds = divmod(total, 60)
     return '%2d:%05.2f' % (minutes, seconds)
 
 
 class BaseMachFormatter(base.BaseFormatter):
     def __init__(self, start_time=None, write_interval=False, write_times=True):
--- a/testing/mozbase/mozlog/mozlog/structured/handlers/__init__.py
+++ b/testing/mozbase/mozlog/mozlog/structured/handlers/__init__.py
@@ -58,12 +58,12 @@ class StreamHandler(BaseHandler):
 
         :param data: Structured log message dictionary."""
         formatted = self.formatter(data)
         if not formatted:
             return
         with self._lock:
             #XXX Should encoding be the formatter's responsibility?
             try:
-                self.stream.write(formatted.encode("utf8"))
+                self.stream.write(formatted.encode("utf8", "replace"))
             except:
                 raise
             self.stream.flush()
--- a/testing/mozbase/mozlog/mozlog/structured/structuredlog.py
+++ b/testing/mozbase/mozlog/mozlog/structured/structuredlog.py
@@ -108,23 +108,33 @@ class StructuredLogger(object):
                 break
 
     @property
     def handlers(self):
         """A list of handlers that will be called when a
         message is logged from this logger"""
         return self._handlers[self.name]
 
+    def log_raw(self, data):
+        if "action" not in data:
+            raise ValueError
+        data = self._make_log_data(data['action'], data)
+        self._handle_log(data)
+
     def _log_data(self, action, data=None):
         if data is None:
             data = {}
+
+        log_data = self._make_log_data(action, data)
+        self._handle_log(log_data)
+
+    def _handle_log(self, data):
         with self._lock:
-            log_data = self._make_log_data(action, data)
             for handler in self.handlers:
-                handler(log_data)
+                handler(data)
 
     def _make_log_data(self, action, data):
         all_data = {"action": action,
                     "time": int(time.time() * 1000),
                     "thread": current_thread().name,
                     "pid": current_process().pid,
                     "source": self.name}
         if self.component:
--- a/testing/mozbase/mozrunner/mozrunner/base/device.py
+++ b/testing/mozbase/mozrunner/mozrunner/base/device.py
@@ -15,20 +15,21 @@ from .runner import BaseRunner
 from ..devices import Emulator
 
 class DeviceRunner(BaseRunner):
     """
     The base runner class used for running gecko on
     remote devices (or emulators), such as B2G.
     """
     def __init__(self, device_class, device_args=None, **kwargs):
-        process_args = kwargs.get('process_args', {})
-        process_args.update({ 'stream': sys.stdout,
-                              'processOutputLine': self.on_output,
-                              'onTimeout': self.on_timeout })
+        process_args = {'stream': sys.stdout,
+                        'processOutputLine': self.on_output,
+                        'onTimeout': self.on_timeout }
+        process_args.update(kwargs.get('process_args') or {})
+
         kwargs['process_args'] = process_args
         BaseRunner.__init__(self, **kwargs)
 
         device_args = device_args or {}
         self.device = device_class(**device_args)
 
         process_log = tempfile.NamedTemporaryFile(suffix='pidlog')
         self._env =  { 'MOZ_CRASHREPORTER': '1',
--- a/testing/specialpowers/content/MozillaLogger.js
+++ b/testing/specialpowers/content/MozillaLogger.js
@@ -1,26 +1,30 @@
 /**
  * MozillaLogger, a base class logger that just logs to stdout.
  */
 
 "use strict";
 
 function MozillaLogger(aPath) {
 }
+// This delimiter is used to avoid interleaving Mochitest/Gecko logs.
+var LOG_DELIMITER = String.fromCharCode(0xe175) + String.fromCharCode(0xee31) + String.fromCharCode(0x2c32) + String.fromCharCode(0xacbf);
+function formatLogMessage(msg) {
+    return LOG_DELIMITER + msg.info.join(' ') + LOG_DELIMITER + "\n";
+}
 
 MozillaLogger.prototype = {
+  init : function(path) {},
 
-  init : function(path) {},
-  
   getLogCallback : function() {
     return function (msg) {
-      var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
+      var data = formatLogMessage(msg);
       dump(data);
-    }
+    };
   },
 
   log : function(msg) {
     dump(msg);
   },
 
   close : function() {}
 };
@@ -39,23 +43,23 @@ function SpecialPowersLogger(aPath) {
 
 SpecialPowersLogger.prototype = {
   init : function (path) {
     SpecialPowers.setLogFile(path);
   },
 
   getLogCallback : function () {
     return function (msg) {
-      var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
+      var data = formatLogMessage(msg);
       SpecialPowers.log(data);
 
       if (data.indexOf("SimpleTest FINISH") >= 0) {
         SpecialPowers.closeLogFile();
       }
-    }
+    };
   },
 
   log : function (msg) {
     SpecialPowers.log(msg);
   },
 
   close : function () {
     SpecialPowers.closeLogFile();
@@ -73,51 +77,59 @@ SpecialPowersLogger.prototype = {
 function MozillaFileLogger(aPath) {
   // Call the base constructor
   MozillaLogger.call(this);
   this.prototype = new MozillaLogger(aPath);
   this.init(aPath);
 }
 
 MozillaFileLogger.prototype = {
-  
+
   init : function (path) {
     var PR_WRITE_ONLY   = 0x02; // Open for writing only.
     var PR_CREATE_FILE  = 0x08;
     var PR_APPEND       = 0x10;
     this._file = Components.classes["@mozilla.org/file/local;1"].
                             createInstance(Components.interfaces.nsILocalFile);
     this._file.initWithPath(path);
     this._foStream = Components.classes["@mozilla.org/network/file-output-stream;1"].
                                      createInstance(Components.interfaces.nsIFileOutputStream);
     this._foStream.init(this._file, PR_WRITE_ONLY | PR_CREATE_FILE | PR_APPEND,
                                      436 /* 0664 */, 0);
+
+    this._converter = Components.classes["@mozilla.org/intl/converter-output-stream;1"].
+                    createInstance(Components.interfaces.nsIConverterOutputStream);
+    this._converter.init(this._foStream, "UTF-8", 0, 0);
   },
 
   getLogCallback : function() {
     return function (msg) {
-      var data = msg.num + " " + msg.level + " " + msg.info.join(' ') + "\n";
-      if (MozillaFileLogger._foStream)
-        this._foStream.write(data, data.length);
+      var data = formatLogMessage(msg);
+      if (MozillaFileLogger._converter) {
+        this._converter.writeString(data);
+      }
 
       if (data.indexOf("SimpleTest FINISH") >= 0) {
         MozillaFileLogger.close();
       }
-    }
+    };
   },
 
   log : function(msg) {
-    if (this._foStream)
-      this._foStream.write(msg, msg.length);
+    if (this._converter) {
+      this._converter.writeString(msg);
+    }
   },
-
   close : function() {
-    if(this._foStream)
-      this._foStream.close();
-  
+    if (this._converter) {
+      this._converter.flush();
+      this._converter.close();
+    }
+
     this._foStream = null;
+    this._converter = null;
     this._file = null;
   }
 };
 
 this.MozillaLogger = MozillaLogger;
 this.SpecialPowersLogger = SpecialPowersLogger;
 this.MozillaFileLogger = MozillaFileLogger;
--- a/toolkit/devtools/server/tests/mochitest/test_connectToChild.html
+++ b/toolkit/devtools/server/tests/mochitest/test_connectToChild.html
@@ -44,17 +44,17 @@ function runTests() {
   // this fake actor is disconnected.
   mm.loadFrameScript("data:text/javascript,new " + function FrameScriptScope() {
     const {DebuggerServer} = Cu.import("resource://gre/modules/devtools/dbg-server.jsm", {});
 
     if (!DebuggerServer.initialized) {
       DebuggerServer.init();
     }
 
-    function TestActor() {dump("instanciate test actor");}
+    function TestActor() {dump("instanciate test actor\n");}
     TestActor.prototype = {
       actorPrefix: "test",
 
       disconnect: function () {
         sendAsyncMessage("test-actor-disconnected", null);
       },
       hello: function () {
         return {msg:"world"};
--- a/toolkit/mozapps/extensions/test/browser/head.js
+++ b/toolkit/mozapps/extensions/test/browser/head.js
@@ -352,17 +352,17 @@ function close_manager(aManagerWindow, a
   let p = new Promise((resolve, reject) => {
     requestLongerTimeout(aLongerTimeout ? aLongerTimeout : 2);
 
     ok(aManagerWindow != null, "Should have an add-ons manager window to close");
     is(aManagerWindow.location, MANAGER_URI, "Should be closing window with correct URI");
 
     aManagerWindow.addEventListener("unload", function() {
       try {
-        dump("Manager window unload handler");
+        dump("Manager window unload handler\n");
         this.removeEventListener("unload", arguments.callee, false);
         resolve();
       } catch(e) {
         reject(e);
       }
     }, false);
   });
 
@@ -1098,17 +1098,17 @@ MockAddon.prototype = {
 
   set permissions(val) {
     return this._permissions = val;
   },
 
   get applyBackgroundUpdates() {
     return this._applyBackgroundUpdates;
   },
-  
+
   set applyBackgroundUpdates(val) {
     if (val != AddonManager.AUTOUPDATE_DEFAULT &&
         val != AddonManager.AUTOUPDATE_DISABLE &&
         val != AddonManager.AUTOUPDATE_ENABLE) {
       ok(false, "addon.applyBackgroundUpdates set to an invalid value: " + val);
     }
     this._applyBackgroundUpdates = val;
     AddonManagerPrivate.callAddonListeners("onPropertyChanged", this, ["applyBackgroundUpdates"]);