Bug 896087 - Output structured messages in head.js, interpret them in runxpcshelltests.py; r=ted
authorChris Manchester <cmanchester@mozilla.com>
Mon, 22 Jul 2013 19:44:25 -0700
changeset 143976 89e249527629504b111cd89ff5b97249fb5cf2ea
parent 143975 ec97a5129a9d83c6e22f4ab1d93020fcdbaceaa8
child 143977 a6d587dd711eca049834efdc02bc09d18929f040
push id1
push userroot
push dateMon, 20 Oct 2014 17:29:22 +0000
reviewersted
bugs896087
milestone26.0a1
Bug 896087 - Output structured messages in head.js, interpret them in runxpcshelltests.py; r=ted
testing/xpcshell/head.js
testing/xpcshell/runxpcshelltests.py
testing/xpcshell/selftest.py
--- a/testing/xpcshell/head.js
+++ b/testing/xpcshell/head.js
@@ -14,16 +14,25 @@ var _quit = false;
 var _passed = true;
 var _tests_pending = 0;
 var _passedChecks = 0, _falsePassedChecks = 0;
 var _todoChecks = 0;
 var _cleanupFunctions = [];
 var _pendingTimers = [];
 var _profileInitialized = false;
 
+let _log = function (action, params) {
+  if (typeof _XPCSHELL_PROCESS != "undefined") {
+    params.process = _XPCSHELL_PROCESS;
+  }
+  params.action = action;
+  params._time = Date.now();
+  dump("\n" + JSON.stringify(params) + "\n");
+}
+
 function _dump(str) {
   let start = /^TEST-/.test(str) ? "\n" : "";
   if (typeof _XPCSHELL_PROCESS == "undefined") {
     dump(start + str);
   } else {
     dump(start + _XPCSHELL_PROCESS + ": " + str);
   }
 }
@@ -149,47 +158,53 @@ function _Timer(func, delay) {
     do_timeout(newDelay, this._func);
   }
 };
 
 function _do_main() {
   if (_quit)
     return;
 
-  _dump("TEST-INFO | (xpcshell/head.js) | running event loop\n");
+  _log("test_info",
+       {_message: "TEST-INFO | (xpcshell/head.js) | running event loop\n"});
 
   var thr = Components.classes["@mozilla.org/thread-manager;1"]
                       .getService().currentThread;
 
   while (!_quit)
     thr.processNextEvent(true);
 
   while (thr.hasPendingEvents())
     thr.processNextEvent(true);
 }
 
 function _do_quit() {
-  _dump("TEST-INFO | (xpcshell/head.js) | exiting test\n");
+  _log("test_info",
+       {_message: "TEST-INFO | (xpcshell/head.js) | exiting test\n"});
 
   _quit = true;
 }
 
-function _dump_exception_stack(stack) {
-  stack.split("\n").forEach(function(frame) {
-    if (!frame)
-      return;
-    // frame is of the form "fname(args)@file:line"
-    let frame_regexp = new RegExp("(.*)\\(.*\\)@(.*):(\\d*)", "g");
-    let parts = frame_regexp.exec(frame);
-    if (parts)
-        dump("JS frame :: " + parts[2] + " :: " + (parts[1] ? parts[1] : "anonymous")
-             + " :: line " + parts[3] + "\n");
-    else /* Could be a -e (command line string) style location. */
-        dump("JS frame :: " + frame + "\n");
-  });
+function _format_exception_stack(stack) {
+  // frame is of the form "fname(args)@file:line"
+  let frame_regexp = new RegExp("(.*)\\(.*\\)@(.*):(\\d*)", "g");
+  return stack.split("\n").reduce(function(stack_msg, frame) {
+    if (frame) {
+      let parts = frame_regexp.exec(frame);
+      if (parts) {
+        return stack_msg + "JS frame :: " + parts[2] + " :: " +
+          (parts[1] ? parts[1] : "anonymous") +
+          " :: line " + parts[3] + "\n";
+      }
+      else { /* Could be a -e (command line string) style location. */
+        return stack_msg + "JS frame :: " + frame + "\n";
+      }
+    }
+    return stack_msg;
+  }, "");
 }
 
 /**
  * Overrides idleService with a mock.  Idle is commonly used for maintenance
  * tasks, thus if a test uses a service that requires the idle service, it will
  * start handling them.
  * This behaviour would cause random failures and slowdown tests execution,
  * for example by running database vacuum or cleanups for each test.
@@ -336,33 +351,31 @@ function _execute_test() {
   } catch (e) {
     _passed = false;
     // do_check failures are already logged and set _quit to true and throw
     // NS_ERROR_ABORT. If both of those are true it is likely this exception
     // has already been logged so there is no need to log it again. It's
     // possible that this will mask an NS_ERROR_ABORT that happens after a
     // do_check failure though.
     if (!_quit || e != Components.results.NS_ERROR_ABORT) {
-      let msg = "TEST-UNEXPECTED-FAIL | ";
+      let msgObject = {};
       if (e.fileName) {
-        msg += e.fileName;
+        msgObject.source_file = e.fileName;
         if (e.lineNumber) {
-          msg += ":" + e.lineNumber;
+          msgObject.line_number = e.lineNumber;
         }
       } else {
-        msg += "xpcshell/head.js";
+        msgObject.source_file = "xpcshell/head.js";
       }
-      msg += " | " + e;
+      msgObject.diagnostic = _exception_message(e);
       if (e.stack) {
-        _dump(msg + " - See following stack:\n");
-        _dump_exception_stack(e.stack);
+        msgObject.diagnostic += " - See following stack:\n";
+        msgObject.stack = _format_exception_stack(e.stack);
       }
-      else {
-        _dump(msg + "\n");
-      }
+      _log("test_unexpected_fail", msgObject);
     }
   }
 
   // _TAIL_FILES is dynamically defined by <runxpcshelltests.py>.
   _load_files(_TAIL_FILES);
 
   // Execute all of our cleanup functions.
   var func;
@@ -372,34 +385,51 @@ function _execute_test() {
   // Restore idle service to avoid leaks.
   _fakeIdleService.deactivate();
 
   if (!_passed)
     return;
 
   var truePassedChecks = _passedChecks - _falsePassedChecks;
   if (truePassedChecks > 0) {
-    _dump("TEST-PASS | (xpcshell/head.js) | " + truePassedChecks + " (+ " +
-            _falsePassedChecks + ") check(s) passed\n");
-    _dump("TEST-INFO | (xpcshell/head.js) | " + _todoChecks +
-            " check(s) todo\n");
+    _log("test_pass",
+         {_message: "TEST-PASS | (xpcshell/head.js) | " + truePassedChecks + " (+ " +
+                    _falsePassedChecks + ") check(s) passed\n",
+          source_file: _TEST_FILE,
+          passed_checks: truePassedChecks});
+    _log("test_info",
+         {_message: "TEST-INFO | (xpcshell/head.js) | " + _todoChecks +
+                    " check(s) todo\n",
+          source_file: _TEST_FILE,
+          todo_checks: _todoChecks});
   } else {
     // ToDo: switch to TEST-UNEXPECTED-FAIL when all tests have been updated. (Bug 496443)
-    _dump("TEST-INFO | (xpcshell/head.js) | No (+ " + _falsePassedChecks + ") checks actually run\n");
+    _log("test_info",
+         {_message: "TEST-INFO | (xpcshell/head.js) | No (+ " + _falsePassedChecks +
+                    ") checks actually run\n",
+         source_file: _TEST_FILE});
   }
 }
 
 /**
  * Loads files.
  *
  * @param aFiles Array of files to load.
  */
 function _load_files(aFiles) {
   function loadTailFile(element, index, array) {
-    load(element);
+    try {
+      load(element);
+    } catch (e if e instanceof SyntaxError) {
+      _log("javascript_error",
+           {_message: "TEST-UNEXPECTED-FAIL | (xpcshell/head.js) | Source file " + element + " contains SyntaxError",
+            diagnostic: _exception_message(e),
+            source_file: element,
+            stack: _format_exception_stack(e.stack)});
+    }
   }
 
   aFiles.forEach(loadTailFile);
 }
 
 function _wrap_with_quotes_if_necessary(val) {
   return typeof val == "string" ? '"' + val + '"' : val;
 }
@@ -407,17 +437,20 @@ function _wrap_with_quotes_if_necessary(
 /************** Functions to be used from the tests **************/
 
 /**
  * Prints a message to the output log.
  */
 function do_print(msg) {
   var caller_stack = Components.stack.caller;
   msg = _wrap_with_quotes_if_necessary(msg);
-  _dump("TEST-INFO | " + caller_stack.filename + " | " + msg + "\n");
+  _log("test_info",
+       {source_file: caller_stack.filename,
+        diagnostic: msg});
+
 }
 
 /**
  * Calls the given function at least the specified number of milliseconds later.
  * The callback will not undershoot the given time, but it might overshoot --
  * don't expect precision!
  *
  * @param delay : uint
@@ -441,23 +474,25 @@ function do_execute_soon(callback, aName
         callback();
       } catch (e) {
         // do_check failures are already logged and set _quit to true and throw
         // NS_ERROR_ABORT. If both of those are true it is likely this exception
         // has already been logged so there is no need to log it again. It's
         // possible that this will mask an NS_ERROR_ABORT that happens after a
         // do_check failure though.
         if (!_quit || e != Components.results.NS_ERROR_ABORT) {
-          _dump("TEST-UNEXPECTED-FAIL | (xpcshell/head.js) | " + e);
           if (e.stack) {
-            dump(" - See following stack:\n");
-            _dump_exception_stack(e.stack);
-          }
-          else {
-            dump("\n");
+            _log("javascript_error",
+                 {source_file: "xpcshell/head.js",
+                  diagnostic: _exception_message(e) + " - See following stack:",
+                  stack: _format_exception_stack(e.stack)});
+          } else {
+            _log("javascript_error",
+                 {source_file: "xpcshell/head.js",
+                  diagnostic: _exception_message(e)});
           }
           _do_quit();
         }
       }
       finally {
         do_test_finished(funcName);
       }
     }
@@ -477,47 +512,44 @@ function do_throw(error, stack) {
   // otherwise get it from our call context
   stack = stack || error.stack || Components.stack.caller;
 
   if (stack instanceof Components.interfaces.nsIStackFrame)
     filename = stack.filename;
   else if (error.fileName)
     filename = error.fileName;
 
-  _dump_message_with_stack("TEST-UNEXPECTED-FAIL | " + filename + " | ", error, stack);
+  _log_message_with_stack("test_unexpected_fail",
+                          error, stack, filename);
 
   _passed = false;
   _do_quit();
   throw Components.results.NS_ERROR_ABORT;
 }
 
-function _dump_stack(stack) {
+function _format_stack(stack) {
   if (stack instanceof Components.interfaces.nsIStackFrame) {
+    let stack_msg = "";
     let frame = stack;
     while (frame != null) {
-      _dump(frame + "\n");
+      stack_msg += frame + "\n";
       frame = frame.caller;
     }
+    return stack_msg;
   }
-  else if (typeof stack == "string") {
-    let stackLines = stack.split("\n");
-    for (let line of stackLines) {
-      _dump(line + "\n");
-    }
-  }
+  return "" + stack;
 }
 
 function do_throw_todo(text, stack) {
   if (!stack)
     stack = Components.stack.caller;
 
   _passed = false;
-  _dump_message_with_stack("TEST-UNEXPECTED-PASS | " + stack.filename + " | ",
-      text, stack);
-
+  _log_message_with_stack("test_unexpected_pass",
+                          text, stack, stack.filename);
   _do_quit();
   throw Components.results.NS_ERROR_ABORT;
 }
 
 // Make a nice display string from an object that behaves
 // like Error
 function _exception_message(ex) {
   let message = "";
@@ -535,44 +567,49 @@ function _exception_message(ex) {
   }
   if (message !== "") {
     return message;
   }
   // Force ex to be stringified
   return "" + ex;
 }
 
-function _dump_message_with_stack(preamble, ex, stack) {
-  _dump(preamble + _exception_message(ex) +
-      (stack ? " - see following stack:\n" : "\n"));
+function _log_message_with_stack(action, ex, stack, filename, text) {
   if (stack) {
-    _dump_stack(stack);
+    _log(action,
+         {diagnostic: (text ? text : "") +
+                      _exception_message(ex) +
+                      " - See following stack:",
+          source_file: filename,
+          stack: _format_stack(stack)});
+  } else {
+    _log(action,
+         {diagnostic: (text ? text : "") +
+                      _exception_message(ex),
+          source_file: filename});
   }
 }
 
 function do_report_unexpected_exception(ex, text) {
   var caller_stack = Components.stack.caller;
   text = text ? text + " - " : "";
 
   _passed = false;
-  _dump_message_with_stack("TEST-UNEXPECTED-FAIL | " + caller_stack.filename +
-        " | " + text + "Unexpected exception ",
-        ex, ex.stack);
-
+  _log_message_with_stack("test_unexpected_fail", ex, ex.stack,
+                          caller_stack.filename, text + "Unexpected exception ");
   _do_quit();
   throw Components.results.NS_ERROR_ABORT;
 }
 
 function do_note_exception(ex, text) {
   var caller_stack = Components.stack.caller;
   text = text ? text + " - " : "";
 
-  _dump_message_with_stack("TEST-INFO | " + caller_stack.filename +
-        " | " + text + "Swallowed exception ",
-        ex, ex.stack);
+  _log_message_with_stack("test_info", ex, ex.stack,
+                          caller_stack.filename, text + "Swallowed exception ");
 }
 
 function _do_check_neq(left, right, stack, todo) {
   if (!stack)
     stack = Components.stack.caller;
 
   var text = _wrap_with_quotes_if_necessary(left) + " != " +
              _wrap_with_quotes_if_necessary(right);
@@ -594,24 +631,32 @@ function todo_check_neq(left, right, sta
 }
 
 function do_report_result(passed, text, stack, todo) {
   if (passed) {
     if (todo) {
       do_throw_todo(text, stack);
     } else {
       ++_passedChecks;
-      _dump("TEST-PASS | " + stack.filename + " | [" + stack.name + " : " +
-            stack.lineNumber + "] " + text + "\n");
+      _log("test_pass",
+           {source_file: stack.filename,
+            test_name: stack.name,
+            line_number: stack.lineNumber,
+            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
+                        text + "\n"});
     }
   } else {
     if (todo) {
       ++_todoChecks;
-      _dump("TEST-KNOWN-FAIL | " + stack.filename + " | [" + stack.name +
-            " : " + stack.lineNumber + "] " + text +"\n");
+      _log("test_known_fail",
+           {source_file: stack.filename,
+            test_name: stack.name,
+            line_number: stack.lineNumber,
+            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
+                        text + "\n"});
     } else {
       do_throw(text, stack);
     }
   }
 }
 
 function _do_check_eq(left, right, stack, todo) {
   if (!stack)
@@ -821,24 +866,27 @@ function format_pattern_match_failure(di
     a += format_pattern_match_failure(diagnosis.diagnosis, indent + "  ");
   }
   return indent + a;
 }
 
 function do_test_pending(aName) {
   ++_tests_pending;
 
-  _dump("TEST-INFO | (xpcshell/head.js) | test" + (aName ? " " + aName : "") +
-         " pending (" + _tests_pending + ")\n");
+  _log("test_pending",
+       {_message: "TEST-INFO | (xpcshell/head.js) | test" +
+                  (aName ? " " + aName : "") +
+                  " pending (" + _tests_pending + ")\n"});
 }
 
 function do_test_finished(aName) {
-  _dump("TEST-INFO | (xpcshell/head.js) | test" + (aName ? " " + aName : "") +
-         " finished (" + _tests_pending + ")\n");
-
+  _log("test_finish",
+       {_message: "TEST-INFO | (xpcshell/head.js) | test" +
+                  (aName ? " " + aName : "") +
+                  " finished (" + _tests_pending + ")\n"});
   if (--_tests_pending == 0)
     _do_quit();
 }
 
 function do_get_file(path, allowNonexistent) {
   try {
     let lf = Components.classes["@mozilla.org/file/directory_service;1"]
       .getService(Components.interfaces.nsIProperties)
@@ -853,19 +901,22 @@ function do_get_file(path, allowNonexist
           lf.append(bits[i]);
       }
     }
 
     if (!allowNonexistent && !lf.exists()) {
       // Not using do_throw(): caller will continue.
       _passed = false;
       var stack = Components.stack.caller;
-      _dump("TEST-UNEXPECTED-FAIL | " + stack.filename + " | [" +
-            stack.name + " : " + stack.lineNumber + "] " + lf.path +
-            " does not exist\n");
+      _log("test_unexpected_fail",
+           {source_file: stack.filename,
+            test_name: stack.name,
+            line_number: stack.lineNumber,
+            diagnostic: "[" + stack.name + " : " + stack.lineNumber + "] " +
+                        lf.path + " does not exist\n"});
     }
 
     return lf;
   }
   catch (ex) {
     do_throw(ex.toString(), Components.stack.caller);
   }
 
@@ -955,17 +1006,19 @@ function do_get_tempdir() {
 /**
  * Registers a directory with the profile service,
  * and return the directory as an nsILocalFile.
  *
  * @return nsILocalFile of the profile directory.
  */
 function do_get_profile() {
   if (!runningInParent) {
-    _dump("TEST-INFO | (xpcshell/head.js) | Ignoring profile creation from child process.\n");
+    _log("test_info",
+         {_message: "TEST-INFO | (xpcshell/head.js) | Ignoring profile creation from child process.\n"});
+
     return null;
   }
 
   if (!_profileInitialized) {
     // Since we have a profile, we will notify profile shutdown topics at
     // the end of the current test, to ensure correct cleanup on shutdown.
     do_register_cleanup(function() {
       let obsSvc = Components.classes["@mozilla.org/observer-service;1"].
@@ -1053,17 +1106,16 @@ function do_load_child_test_harness()
       + "const _TAIL_FILES=" + uneval(_TAIL_FILES) + "; "
       + "const _XPCSHELL_PROCESS='child';";
 
   if (this._TESTING_MODULES_DIR) {
     command += " const _TESTING_MODULES_DIR=" + uneval(_TESTING_MODULES_DIR) + ";";
   }
 
   command += " load(_HEAD_JS_PATH);";
-
   sendCommand(command);
 }
 
 /**
  * Runs an entire xpcshell unit test in a child process (rather than in chrome,
  * which is the default).
  *
  * This function returns immediately, before the test has completed.  
@@ -1078,19 +1130,19 @@ function run_test_in_child(testFile, opt
 {
   var callback = (typeof optionalCallback == 'undefined') ? 
                     do_test_finished : optionalCallback;
 
   do_load_child_test_harness();
 
   var testPath = do_get_file(testFile).path.replace(/\\/g, "/");
   do_test_pending("run in child");
-  sendCommand("_dump('CHILD-TEST-STARTED'); "
+  sendCommand("_log('child_test_start', {_message: 'CHILD-TEST-STARTED'}); "
               + "const _TEST_FILE=['" + testPath + "']; _execute_test(); "
-              + "_dump('CHILD-TEST-COMPLETED');", 
+              + "_log('child_test_end', {_message: 'CHILD-TEST-COMPLETED'});",
               callback);
 }
 
 
 /**
  * Add a test function to the list of tests that are to be run asynchronously.
  *
  * Each test function must call run_next_test() when it's done. Test files
--- a/testing/xpcshell/runxpcshelltests.py
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -1,29 +1,34 @@
 #!/usr/bin/env python
 #
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import copy
-import re, sys, os, os.path, logging, shutil, math, time, traceback
+import json
+import math
+import os
+import os.path
+import random
+import shutil
+import signal
+import socket
+import sys
+import time
+import traceback
 import xml.dom.minidom
 from collections import deque
 from distutils import dir_util
-from glob import glob
 from multiprocessing import cpu_count
 from optparse import OptionParser
 from subprocess import Popen, PIPE, STDOUT
 from tempfile import mkdtemp, gettempdir
 from threading import Timer, Thread, Event, RLock
-import random
-import signal
-import socket
-import time
 
 try:
     import psutil
     HAVE_PSUTIL = True
 except ImportError:
     HAVE_PSUTIL = False
 
 from automation import Automation, getGlobalLog, resetGlobalLog
@@ -35,16 +40,28 @@ from automationutils import *
 # are intended to be continuous are protected by the same lock.
 LOG_MUTEX = RLock()
 
 HARNESS_TIMEOUT = 5 * 60
 
 # benchmarking on tbpl revealed that this works best for now
 NUM_THREADS = int(cpu_count() * 4)
 
+FAILURE_ACTIONS = set(['test_unexpected_fail',
+                       'test_unexpected_pass',
+                       'javascript_error'])
+ACTION_STRINGS = {
+    "test_unexpected_fail": "TEST-UNEXPECTED-FAIL",
+    "test_known_fail": "TEST-KNOWN-FAIL",
+    "test_unexpected_pass": "TEST-UNEXPECTED-PASS",
+    "javascript_error": "TEST-UNEXPECTED-FAIL",
+    "test_pass": "TEST-PASS",
+    "test_info": "TEST-INFO"
+}
+
 # --------------------------------------------------------------
 # TODO: this is a hack for mozbase without virtualenv, remove with bug 849900
 #
 here = os.path.dirname(__file__)
 mozbase = os.path.realpath(os.path.join(os.path.dirname(here), 'mozbase'))
 
 if os.path.isdir(mozbase):
     for package in os.listdir(mozbase):
@@ -105,16 +122,19 @@ class XPCShellTestThread(Thread):
         self.log = log
 
         # only one of these will be set to 1. adding them to the totals in
         # the harness
         self.passCount = 0
         self.todoCount = 0
         self.failCount = 0
 
+        self.output_lines = []
+        self.has_failure_output = False
+
         # event from main thread to signal work done
         self.event = event
 
     def run(self):
         try:
             self.run_test()
         except Exception as e:
             self.exception = e
@@ -324,25 +344,17 @@ class XPCShellTestThread(Thread):
         # setting up every test with its own plugins directory.
         if not self.pluginsPath:
             self.pluginsPath = os.path.join(self.appPath, 'plugins')
 
         self.pluginsDir = self.setupPluginsDir()
         if self.pluginsDir:
             self.xpcsCmd.extend(['-p', self.pluginsDir])
 
-    def print_stdout(self, stdout):
-        """Print stdout line-by-line to avoid overflowing buffers."""
-        self.log.info(">>>>>>>")
-        if stdout:
-            for line in stdout.splitlines():
-                self.log.info(line)
-        self.log.info("<<<<<<<")
-
-    def cleanupDir(self, directory, name, stdout, xunit_result):
+    def cleanupDir(self, directory, name, xunit_result):
         TRY_LIMIT = 25 # up to TRY_LIMIT attempts (one every second), because
                        # the Windows filesystem is slow to react to the changes
         try_count = 0
         while try_count < TRY_LIMIT:
             try:
                 self.removeDir(directory)
             except OSError:
                 self.log.info("TEST-INFO | Failed to remove directory: %s. Waiting." % directory)
@@ -359,27 +371,102 @@ class XPCShellTestThread(Thread):
             self.cleanup_dir_list.append(directory)
             return
 
         # we couldn't clean up the directory, and it's not the plugins dir,
         # which means that something wrong has probably happened
         with LOG_MUTEX:
             message = "TEST-UNEXPECTED-FAIL | %s | Failed to clean up directory: %s" % (name, sys.exc_info()[1])
             self.log.error(message)
-            self.print_stdout(stdout)
-            self.print_stdout(traceback.format_exc())
+            self.log_output(self.output_lines)
+            self.log_output(traceback.format_exc())
 
         self.failCount += 1
         xunit_result["passed"] = False
         xunit_result["failure"] = {
             "type": "TEST-UNEXPECTED-FAIL",
             "message": message,
             "text": "%s\n%s" % (stdout, traceback.format_exc())
         }
 
+    def append_message_from_line(self, line):
+        """Given a line of raw output, convert to message and append to
+        output buffer."""
+        if isinstance(line, basestring):
+            # This function has received unstructured output.
+            if line:
+                self.output_lines.append(line)
+                if 'TEST-UNEXPECTED-' in line:
+                    self.has_failure_output = True
+            return
+
+        msg = ['%s: ' % line['process'] if 'process' in line else '']
+
+        # Each call to the logger in head.js either specified '_message'
+        # or both 'source_file' and 'diagnostic'. If either of these are
+        # missing, they ended up being undefined as a result of the way
+        # the test was run.
+        if '_message' in line:
+            msg.append(line['_message'])
+        else:
+            msg.append('%s | %s | %s' % (ACTION_STRINGS[line['action']],
+                                         line.get('source_file', 'undefined'),
+                                         line.get('diagnostic', 'undefined')))
+
+        msg.append('\n%s' % line['stack'] if 'stack' in line else '')
+        self.output_lines.append(''.join(msg))
+
+    def parse_output(self, output):
+        """Parses process output for structured messages and saves output as it is
+        read. Sets self.has_failure_output in case of evidence of a failure"""
+        seen_proc_start = False
+        seen_proc_end = False
+        self.output_lines = []
+        for line_string in output.splitlines():
+            try:
+                line_object = json.loads(line_string)
+                if not isinstance(line_object, dict):
+                    self.append_message_from_line(line_string)
+                    continue
+            except ValueError:
+                self.append_message_from_line(line_string)
+                continue
+
+            if 'action' not in line_object:
+                # In case a test outputs something that happens to be valid
+                # JSON object.
+                self.append_message_from_line(line_string)
+                continue
+
+            action = line_object['action']
+            self.append_message_from_line(line_object)
+
+            if action in FAILURE_ACTIONS:
+                self.has_failure_output = True
+
+            elif action == 'child_test_start':
+                seen_proc_start = True
+            elif action == 'child_test_end':
+                seen_proc_end = True
+
+        if seen_proc_start and not seen_proc_end:
+            self.has_failure_output = True
+
+    def log_output(self, output):
+        """Prints given output line-by-line to avoid overflowing buffers."""
+        self.log.info(">>>>>>>")
+        if output:
+            if isinstance(output, basestring):
+                output = output.splitlines()
+            for part in output:
+                # For multi-line output, such as a stack trace
+                for line in part.splitlines():
+                    self.log.info(line)
+        self.log.info("<<<<<<<")
+
     def run_test(self):
         """Run an individual xpcshell test."""
         global gotSIGINT
 
         name = self.test_object['path']
 
         self.xunit_result = {'name': self.test_object['name'], 'classname': 'xpcshell'}
 
@@ -461,37 +548,28 @@ class XPCShellTestThread(Thread):
             if self.interactive:
                 # Not sure what else to do here...
                 self.keep_going = True
                 return
 
             if testTimer:
                 testTimer.cancel()
 
-            result = not ((self.getReturnCode(proc) != 0) or
-                          # if do_throw or do_check failed
-                          (stdout and re.search("^((parent|child): )?TEST-UNEXPECTED-",
-                                                stdout, re.MULTILINE)) or
-                          # if syntax error in xpcshell file
-                          (stdout and re.search(": SyntaxError:", stdout,
-                                                re.MULTILINE)) or
-                          # if e10s test started but never finished (child process crash)
-                          (stdout and re.search("^child: CHILD-TEST-STARTED",
-                                                stdout, re.MULTILINE)
-                                  and not re.search("^child: CHILD-TEST-COMPLETED",
-                                                    stdout, re.MULTILINE)))
+            self.parse_output(stdout)
+            result = not (self.has_failure_output or
+                          (self.getReturnCode(proc) != 0))
 
             if result != expected:
                 failureType = "TEST-UNEXPECTED-%s" % ("FAIL" if expected else "PASS")
                 message = "%s | %s | test failed (with xpcshell return code: %d), see following log:" % (
                               failureType, name, self.getReturnCode(proc))
 
                 with LOG_MUTEX:
                     self.log.error(message)
-                    self.print_stdout(stdout)
+                    self.log_output(self.output_lines)
 
                 self.failCount += 1
                 self.xunit_result["passed"] = False
 
                 self.xunit_result["failure"] = {
                   "type": failureType,
                   "message": message,
                   "text": stdout
@@ -499,17 +577,17 @@ class XPCShellTestThread(Thread):
             else:
                 now = time.time()
                 timeTaken = (now - startTime) * 1000
                 self.xunit_result["time"] = now - startTime
 
                 with LOG_MUTEX:
                     self.log.info("TEST-%s | %s | test passed (time: %.3fms)" % ("PASS" if expected else "KNOWN-FAIL", name, timeTaken))
                     if self.verbose:
-                        self.print_stdout(stdout)
+                        self.log_output(self.output_lines)
 
                 self.xunit_result["passed"] = True
 
                 if expected:
                     self.passCount = 1
                 else:
                     self.todoCount = 1
                     self.xunit_result["todo"] = True
@@ -529,37 +607,37 @@ class XPCShellTestThread(Thread):
 
         finally:
             # We can sometimes get here before the process has terminated, which would
             # cause removeDir() to fail - so check for the process & kill it it needed.
             if proc and self.poll(proc) is None:
                 with LOG_MUTEX:
                     message = "TEST-UNEXPECTED-FAIL | %s | Process still running after test!" % name
                     self.log.error(message)
-                    self.print_stdout(stdout)
+                    self.log_output(self.output_lines)
 
                 self.failCount = 1
                 self.xunit_result["passed"] = False
                 self.xunit_result["failure"] = {
                   "type": "TEST-UNEXPECTED-FAIL",
                   "message": message,
                   "text": stdout
                 }
                 self.kill(proc)
 
 
             # We don't want to delete the profile when running check-interactive
             # or check-one.
             if self.profileDir and not self.interactive and not self.singleFile:
-                self.cleanupDir(self.profileDir, name, stdout, self.xunit_result)
+                self.cleanupDir(self.profileDir, name, self.xunit_result)
 
-            self.cleanupDir(self.tempDir, name, stdout, self.xunit_result)
+            self.cleanupDir(self.tempDir, name, self.xunit_result)
 
             if self.pluginsDir:
-                self.cleanupDir(self.pluginsDir, name, stdout, self.xunit_result)
+                self.cleanupDir(self.pluginsDir, name, self.xunit_result)
 
         if gotSIGINT:
             self.xunit_result["passed"] = False
             self.xunit_result["time"] = "0.0"
             self.xunit_result["failure"] = {
                 "type": "SIGINT",
                 "message": "Received SIGINT",
                 "text": "Received SIGINT (control-C) during test execution."
--- a/testing/xpcshell/selftest.py
+++ b/testing/xpcshell/selftest.py
@@ -59,17 +59,17 @@ add_test(function test_child_simple () {
 '''
 
 CHILD_TEST_HANG = '''
 function run_test () { run_next_test(); }
 
 add_test(function test_child_simple () {
   do_test_pending("hang test");
   do_load_child_test_harness();
-  sendCommand("_dump('CHILD-TEST-STARTED'); " +
+  sendCommand("_log('child_test_start', {_message: 'CHILD-TEST-STARTED'}); " +
               + "const _TEST_FILE=['test_pass.js']; _execute_test(); ",
               do_test_finished);
   run_next_test();
 });
 '''
 
 ADD_TASK_SINGLE = '''
 Components.utils.import("resource://gre/modules/commonjs/sdk/core/promise.js");