Bug 956739 - Move marionette tests to structured logging.;r=mdas
authorChris Manchester <cmanchester@mozilla.com>
Thu, 16 Jan 2014 14:58:56 +0000
changeset 192840 53a30a12d70ed9122fca579b0f4af9989d70a05a
parent 192839 b94c916122c80098144b9e10ee1323b07f8302da
child 192841 8df1affbbde7a3805ab195cd2a687c9d7aaa163f
push id27098
push userkwierso@gmail.com
push dateTue, 08 Jul 2014 22:38:20 +0000
treeherdermozilla-central@e185bf9449f6 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmdas
bugs956739
milestone33.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 956739 - Move marionette tests to structured logging.;r=mdas
testing/marionette/client/marionette/marionette_test.py
testing/marionette/client/marionette/runner/base.py
testing/marionette/client/marionette/runner/mixins/b2g.py
testing/marionette/client/marionette/runtests.py
testing/marionette/client/requirements.txt
testing/marionette/client/setup.py
testing/marionette/mach_commands.py
--- a/testing/marionette/client/marionette/marionette_test.py
+++ b/testing/marionette/client/marionette/marionette_test.py
@@ -10,24 +10,25 @@ import sys
 import socket
 import time
 import types
 import unittest
 import weakref
 import warnings
 
 from errors import (
-        ErrorCodes, MarionetteException, InstallGeckoError, TimeoutException, InvalidResponseException, 
+        ErrorCodes, MarionetteException, InstallGeckoError, TimeoutException, InvalidResponseException,
         JavascriptException, NoSuchElementException, XPathLookupException, NoSuchWindowException,
         StaleElementException, ScriptTimeoutException, ElementNotVisibleException,
         NoSuchFrameException, InvalidElementStateException, NoAlertPresentException,
         InvalidCookieDomainException, UnableToSetCookieException, InvalidSelectorException,
         MoveTargetOutOfBoundsException, FrameSendNotInitializedError, FrameSendFailureError
         )
 from marionette import Marionette
+from mozlog.structured.structuredlog import get_default_logger
 
 class SkipTest(Exception):
     """
     Raise this exception in a test to skip it.
 
     Usually you can use TestResult.skip() or one of the skipping decorators
     instead of raising this directly.
     """
@@ -239,16 +240,23 @@ class CommonTestCase(unittest.TestCase):
     def test_name(self):
         if hasattr(self, 'jsFile'):
             return os.path.basename(self.jsFile)
         else:
             return '%s.py %s.%s' % (self.__class__.__module__,
                                     self.__class__.__name__,
                                     self._testMethodName)
 
+    def id(self):
+        # TBPL starring requires that the "test name" field of a failure message
+        # not differ over time. The test name to be used is passed to
+        # mozlog.structured via the test id, so this is overriden to maintain
+        # consistency.
+        return self.test_name
+
     def set_up_test_page(self, emulator, url="test.html", permissions=None):
         emulator.set_context("content")
         url = emulator.absolute_url(url)
         emulator.navigate(url)
 
         if not permissions:
             return
 
@@ -498,24 +506,24 @@ setReq.onerror = function() {
 
             self.assertTrue(not 'timeout' in self.jsFile,
                             'expected timeout not triggered')
 
             if 'fail' in self.jsFile:
                 self.assertTrue(results['failed'] > 0,
                                 "expected test failures didn't occur")
             else:
-                fails = []
+                logger = get_default_logger()
                 for failure in results['failures']:
-                    diag = "" if failure.get('diag') is None else "| %s " % failure['diag']
+                    diag = "" if failure.get('diag') is None else failure['diag']
                     name = "got false, expected true" if failure.get('name') is None else failure['name']
-                    fails.append('TEST-UNEXPECTED-FAIL | %s %s| %s' %
-                                 (os.path.basename(self.jsFile), diag, name))
+                    logger.test_status(self.test_name, name, 'FAIL',
+                                       message=diag)
                 self.assertEqual(0, results['failed'],
-                                 '%d tests failed:\n%s' % (results['failed'], '\n'.join(fails)))
+                                 '%d tests failed' % (results['failed']))
 
             self.assertTrue(results['passed'] + results['failed'] > 0,
                             'no tests run')
 
         except ScriptTimeoutException:
             if 'timeout' in self.jsFile:
                 # expected exception
                 pass
--- a/testing/marionette/client/marionette/runner/base.py
+++ b/testing/marionette/client/marionette/runner/base.py
@@ -1,64 +1,63 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+from datetime import datetime
 from optparse import OptionParser
-from datetime import datetime
-import logging
+
+import json
+import mozinfo
+import moznetwork
 import os
-import unittest
+import random
 import socket
 import sys
 import time
 import traceback
-import random
-import re
-import mozinfo
-import moznetwork
+import unittest
 import xml.dom.minidom as dom
 
 from manifestparser import TestManifest
+from marionette import Marionette
+from mixins.b2g import B2GTestResultMixin, get_b2g_pid, get_dm
 from mozhttpd import MozHttpd
-from marionette import Marionette
+from moztest.adapters.unit import StructuredTestRunner, StructuredTestResult
 from moztest.results import TestResultCollection, TestResult, relevant_line
-from mixins.b2g import B2GTestResultMixin, get_b2g_pid, get_dm
-
 
 class MarionetteTest(TestResult):
 
     @property
     def test_name(self):
         if self.test_class is not None:
             return '%s.py %s.%s' % (self.test_class.split('.')[0],
                                     self.test_class,
                                     self.name)
         else:
             return self.name
 
-
-class MarionetteTestResult(unittest._TextTestResult, TestResultCollection):
+class MarionetteTestResult(StructuredTestResult, TestResultCollection):
 
     resultClass = MarionetteTest
 
     def __init__(self, *args, **kwargs):
         self.marionette = kwargs.pop('marionette')
         TestResultCollection.__init__(self, 'MarionetteTest')
         self.passed = 0
         self.testsRun = 0
         self.result_modifiers = [] # used by mixins to modify the result
         pid = kwargs.pop('b2g_pid')
         if pid:
             if B2GTestResultMixin not in self.__class__.__bases__:
                 bases = [b for b in self.__class__.__bases__]
                 bases.append(B2GTestResultMixin)
                 self.__class__.__bases__ = tuple(bases)
             B2GTestResultMixin.__init__(self, b2g_pid=pid)
-        unittest._TextTestResult.__init__(self, *args, **kwargs)
+        StructuredTestResult.__init__(self, *args, **kwargs)
 
     @property
     def skipped(self):
         return [t for t in self if t.result == 'SKIPPED']
 
     @skipped.setter
     def skipped(self, value):
         pass
@@ -126,67 +125,41 @@ class MarionetteTestResult(unittest._Tex
         t.finish(result_actual,
                  time_end=time.time() if test.start_time else 0,
                  reason=relevant_line(output),
                  output=output)
         self.append(t)
 
     def addError(self, test, err):
         self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='ERROR')
-        self._mirrorOutput = True
-        if self.showAll:
-            self.stream.writeln("ERROR")
-        elif self.dots:
-            self.stream.write('E')
-            self.stream.flush()
+        super(MarionetteTestResult, self).addError(test, err)
 
     def addFailure(self, test, err):
         self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='UNEXPECTED-FAIL')
-        self._mirrorOutput = True
-        if self.showAll:
-            self.stream.writeln("FAIL")
-        elif self.dots:
-            self.stream.write('F')
-            self.stream.flush()
+        super(MarionetteTestResult, self).addFailure(test, err)
 
     def addSuccess(self, test):
         self.passed += 1
         self.add_test_result(test, result_actual='PASS')
-        if self.showAll:
-            self.stream.writeln("ok")
-        elif self.dots:
-            self.stream.write('.')
-            self.stream.flush()
+        super(MarionetteTestResult, self).addSuccess(test)
 
     def addExpectedFailure(self, test, err):
         """Called when an expected failure/error occured."""
         self.add_test_result(test, output=self._exc_info_to_string(err, test),
-                        result_actual='KNOWN-FAIL')
-        if self.showAll:
-            self.stream.writeln("expected failure")
-        elif self.dots:
-            self.stream.write("x")
-            self.stream.flush()
+                             result_actual='KNOWN-FAIL')
+        super(MarionetteTestResult, self).addExpectedFailure(test, err)
 
     def addUnexpectedSuccess(self, test):
         """Called when a test was expected to fail, but succeed."""
         self.add_test_result(test, result_actual='UNEXPECTED-PASS')
-        if self.showAll:
-            self.stream.writeln("unexpected success")
-        elif self.dots:
-            self.stream.write("u")
-            self.stream.flush()
+        super(MarionetteTestResult, self).addUnexpectedSuccess(test)
 
     def addSkip(self, test, reason):
         self.add_test_result(test, output=reason, result_actual='SKIPPED')
-        if self.showAll:
-            self.stream.writeln("skipped {0!r}".format(reason))
-        elif self.dots:
-            self.stream.write("s")
-            self.stream.flush()
+        super(MarionetteTestResult, self).addSkip(test, reason)
 
     def getInfo(self, test):
         return test.test_name
 
     def getDescription(self, test):
         doc_first_line = test.shortDescription()
         if self.descriptions and doc_first_line:
             return '\n'.join((str(test), doc_first_line))
@@ -204,143 +177,63 @@ class MarionetteTestResult(unittest._Tex
                 skip_log = True
                 for line in testcase.loglines:
                     str_line = ' '.join(line)
                     if not 'TEST-END' in str_line and not 'TEST-START' in str_line:
                         skip_log = False
                         break
                 if skip_log:
                     return
-                self.stream.writeln('\nSTART LOG:')
+                self.logger.info('START LOG:')
                 for line in testcase.loglines:
-                    self.stream.writeln(' '.join(line).encode('ascii', 'replace'))
-                self.stream.writeln('END LOG:')
-
-    def printErrorList(self, flavour, errors):
-        TIMEOUT_MESSAGE = "ScriptTimeoutException: ScriptTimeoutException: timed out"
-        for error in errors:
-            err = error.output
-            self.stream.writeln(self.separator1)
-            self.stream.writeln("%s: %s" % (flavour, error.description))
-            self.stream.writeln(self.separator2)
-            lastline = None
-            fail_present = None
-            test_name = self.getInfo(error)
-            for line in err:
-                if not line.startswith('\t') and line != '':
-                    lastline = line
-                if 'TEST-UNEXPECTED-FAIL' in line:
-                    fail_present = True
-            for line in err:
-                if line != lastline or fail_present:
-                    if re.match('.*\.js', test_name):
-                        if error.reason != TIMEOUT_MESSAGE:
-                            self.stream.writeln("%s" % line)
-                    else:
-                        self.stream.writeln("%s" % line)
-
-                else:
-                    self.stream.writeln("TEST-UNEXPECTED-FAIL | %s | %s" %
-                                        (test_name, error.reason))
+                    self.logger.info(' '.join(line).encode('ascii', 'replace'))
+                self.logger.info('END LOG:')
 
     def stopTest(self, *args, **kwargs):
         unittest._TextTestResult.stopTest(self, *args, **kwargs)
         if self.marionette.check_for_crash():
             # this tells unittest.TestSuite not to continue running tests
             self.shouldStop = True
 
 
-class MarionetteTextTestRunner(unittest.TextTestRunner):
+class MarionetteTextTestRunner(StructuredTestRunner):
 
     resultclass = MarionetteTestResult
 
     def __init__(self, **kwargs):
-        self.marionette = kwargs['marionette']
+        self.marionette = kwargs.pop('marionette')
         self.capabilities = kwargs.pop('capabilities')
         self.pre_run_functions = []
         self.b2g_pid = None
-        del kwargs['marionette']
 
         if self.capabilities["device"] != "desktop" and self.capabilities["browserName"] == "B2G":
             def b2g_pre_run():
                 dm_type = os.environ.get('DM_TRANS', 'adb')
                 if dm_type == 'adb':
                     self.b2g_pid = get_b2g_pid(get_dm(self.marionette))
             self.pre_run_functions.append(b2g_pre_run)
 
-        unittest.TextTestRunner.__init__(self, **kwargs)
+        StructuredTestRunner.__init__(self, **kwargs)
+
 
     def _makeResult(self):
         return self.resultclass(self.stream,
                                 self.descriptions,
                                 self.verbosity,
                                 marionette=self.marionette,
-                                b2g_pid=self.b2g_pid)
+                                b2g_pid=self.b2g_pid,
+                                logger=self.logger)
 
     def run(self, test):
         "Run the given test case or test suite."
         for pre_run_func in self.pre_run_functions:
             pre_run_func()
-        result = self._makeResult()
-        if hasattr(self, 'failfast'):
-            result.failfast = self.failfast
-        if hasattr(self, 'buffer'):
-            result.buffer = self.buffer
-        startTime = time.time()
-        startTestRun = getattr(result, 'startTestRun', None)
-        if startTestRun is not None:
-            startTestRun()
-        try:
-            test(result)
-        finally:
-            stopTestRun = getattr(result, 'stopTestRun', None)
-            if stopTestRun is not None:
-                stopTestRun()
-        stopTime = time.time()
-        if hasattr(result, 'time_taken'):
-            result.time_taken = stopTime - startTime
+
+        result = super(MarionetteTextTestRunner, self).run(test)
         result.printLogs(test)
-        result.printErrors()
-        if hasattr(result, 'separator2'):
-            self.stream.writeln(result.separator2)
-        run = result.testsRun
-        self.stream.writeln("Ran %d test%s in %.3fs" %
-                            (run, run != 1 and "s" or "", result.time_taken))
-        self.stream.writeln()
-
-        expectedFails = unexpectedSuccesses = skipped = 0
-        try:
-            results = map(len, (result.expectedFailures,
-                                result.unexpectedSuccesses,
-                                result.skipped))
-        except AttributeError:
-            pass
-        else:
-            expectedFails, unexpectedSuccesses, skipped = results
-
-        infos = []
-        if not result.wasSuccessful():
-            self.stream.write("FAILED")
-            failed, errored = map(len, (result.failures, result.errors))
-            if failed:
-                infos.append("failures=%d" % failed)
-            if errored:
-                infos.append("errors=%d" % errored)
-        else:
-            self.stream.write("OK")
-        if skipped:
-            infos.append("skipped=%d" % skipped)
-        if expectedFails:
-            infos.append("expected failures=%d" % expectedFails)
-        if unexpectedSuccesses:
-            infos.append("unexpected successes=%d" % unexpectedSuccesses)
-        if infos:
-            self.stream.writeln(" (%s)" % (", ".join(infos),))
-        else:
-            self.stream.write("\n")
         return result
 
 
 class BaseMarionetteOptions(OptionParser):
     def __init__(self, **kwargs):
         OptionParser.__init__(self, **kwargs)
         self.parse_args_handlers = [] # Used by mixins
         self.verify_usage_handlers = [] # Used by mixins
@@ -418,17 +311,17 @@ class BaseMarionetteOptions(OptionParser
                         action='store',
                         help='application to use')
         self.add_option('--app-arg',
                         dest='app_args',
                         action='append',
                         default=[],
                         help='specify a command line argument to be passed onto the application')
         self.add_option('--binary',
-                        dest='bin',
+                        dest='binary',
                         action='store',
                         help='gecko executable to launch before running the test')
         self.add_option('--profile',
                         dest='profile',
                         action='store',
                         help='profile to use when launching the gecko process. if not passed, then a profile will be '
                              'constructed and used')
         self.add_option('--repeat',
@@ -453,20 +346,16 @@ class BaseMarionetteOptions(OptionParser
         self.add_option('--symbols-path',
                         dest='symbols_path',
                         action='store',
                         help='absolute path to directory containing breakpad symbols, or the url of a zip file containing symbols')
         self.add_option('--timeout',
                         dest='timeout',
                         type=int,
                         help='if a --timeout value is given, it will set the default page load timeout, search timeout and script timeout to the given value. If not passed in, it will use the default values of 30000ms for page load, 0ms for search timeout and 10000ms for script timeout')
-        self.add_option('--es-server',
-                        dest='es_servers',
-                        action='append',
-                        help='the ElasticSearch server to use for autolog submission')
         self.add_option('--shuffle',
                         action='store_true',
                         dest='shuffle',
                         default=False,
                         help='run tests in a random order')
         self.add_option('--shuffle-seed',
                         dest='shuffle_seed',
                         type=int,
@@ -490,41 +379,43 @@ class BaseMarionetteOptions(OptionParser
                         help='sets the web server\'s root directory to the given path')
         self.add_option('--gecko-log',
                         dest='gecko_log',
                         action='store',
                         help="Define the path to store log file. If the path is"
                              " a directory, the real log file will be created"
                              " given the format gecko-(timestamp).log. If it is"
                              " a file, if will be used directly. Default: 'gecko.log'")
+        self.add_option('--logger-name',
+                        dest='logger_name',
+                        action='store',
+                        default='Marionette-based Tests',
+                        help='Define the name to associate with the logger used')
+
 
     def parse_args(self, args=None, values=None):
         options, tests = OptionParser.parse_args(self, args, values)
         for handler in self.parse_args_handlers:
             handler(options, tests, args, values)
 
         return (options, tests)
 
     def verify_usage(self, options, tests):
         if not tests:
             print 'must specify one or more test files, manifests, or directories'
             sys.exit(1)
 
-        if not options.emulator and not options.address and not options.bin:
+        if not options.emulator and not options.address and not options.binary:
             print 'must specify --binary, --emulator or --address'
             sys.exit(1)
 
-        if options.emulator and options.bin:
+        if options.emulator and options.binary:
             print 'can\'t specify both --emulator and --binary'
             sys.exit(1)
 
-        if not options.es_servers:
-            options.es_servers = ['elasticsearch-zlb.dev.vlan81.phx.mozilla.com:9200',
-                                  'elasticsearch-zlb.webapp.scl3.mozilla.com:9200']
-
         # default to storing logcat output for emulator runs
         if options.emulator and not options.logdir:
             options.logdir = 'logcat'
 
         # check for valid resolution string, strip whitespaces
         try:
             if options.emulator_res:
                 dims = options.emulator_res.split('x')
@@ -555,34 +446,34 @@ class BaseMarionetteOptions(OptionParser
 
 
 class BaseMarionetteTestRunner(object):
 
     textrunnerclass = MarionetteTextTestRunner
 
     def __init__(self, address=None, emulator=None, emulator_binary=None,
                  emulator_img=None, emulator_res='480x800', homedir=None,
-                 app=None, app_args=None, bin=None, profile=None, autolog=False,
+                 app=None, app_args=None, binary=None, profile=None, autolog=False,
                  revision=None, logger=None, testgroup="marionette", no_window=False,
                  logdir=None, xml_output=None, repeat=0,
                  testvars=None, tree=None, type=None, device_serial=None,
-                 symbols_path=None, timeout=None, es_servers=None, shuffle=False,
+                 symbols_path=None, timeout=None, shuffle=False,
                  shuffle_seed=random.randint(0, sys.maxint), sdcard=None,
                  this_chunk=1, total_chunks=1, sources=None, server_root=None,
                  gecko_log=None,
                  **kwargs):
         self.address = address
         self.emulator = emulator
         self.emulator_binary = emulator_binary
         self.emulator_img = emulator_img
         self.emulator_res = emulator_res
         self.homedir = homedir
         self.app = app
         self.app_args = app_args or []
-        self.bin = bin
+        self.bin = binary
         self.profile = profile
         self.autolog = autolog
         self.testgroup = testgroup
         self.revision = revision
         self.logger = logger
         self.no_window = no_window
         self.httpd = None
         self.marionette = None
@@ -594,52 +485,45 @@ class BaseMarionetteTestRunner(object):
         self.tree = tree
         self.type = type
         self.device_serial = device_serial
         self.symbols_path = symbols_path
         self.timeout = timeout
         self._device = None
         self._capabilities = None
         self._appName = None
-        self.es_servers = es_servers
         self.shuffle = shuffle
         self.shuffle_seed = shuffle_seed
         self.sdcard = sdcard
         self.sources = sources
         self.server_root = server_root
         self.this_chunk = this_chunk
         self.total_chunks = total_chunks
         self.gecko_log = gecko_log
         self.mixin_run_tests = []
         self.manifest_skipped_tests = []
         self.tests = []
 
         if testvars:
             if not os.path.exists(testvars):
                 raise IOError('--testvars file does not exist')
 
-            import json
             try:
                 with open(testvars) as f:
                     self.testvars = json.loads(f.read())
             except ValueError as e:
                 json_path = os.path.abspath(testvars)
                 raise Exception("JSON file (%s) is not properly "
                                 "formatted: %s" % (json_path, e.message))
 
         # set up test handlers
         self.test_handlers = []
 
         self.reset_test_stats()
 
-        if self.logger is None:
-            self.logger = logging.getLogger('Marionette')
-            self.logger.setLevel(logging.INFO)
-            self.logger.addHandler(logging.StreamHandler())
-
         if self.logdir:
             if not os.access(self.logdir, os.F_OK):
                 os.mkdir(self.logdir)
 
         # for XML output
         self.testvars['xml_output'] = self.xml_output
         self.results = []
 
@@ -741,60 +625,16 @@ class BaseMarionetteTestRunner(object):
                 'no_window': self.no_window,
                 'sdcard': self.sdcard,
             })
         return kwargs
 
     def start_marionette(self):
         self.marionette = Marionette(**self._build_kwargs())
 
-    def post_to_autolog(self, elapsedtime):
-        self.logger.info('posting results to autolog')
-
-        logfile = None
-        if self.emulator:
-            filename = os.path.join(os.path.abspath(self.logdir),
-                                    "emulator-%d.log" % self.marionette.emulator.port)
-            if os.access(filename, os.F_OK):
-                logfile = filename
-
-        for es_server in self.es_servers:
-
-            # This is all autolog stuff.
-            # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
-            from mozautolog import RESTfulAutologTestGroup
-            testgroup = RESTfulAutologTestGroup(
-                testgroup=self.testgroup,
-                os='android',
-                platform='emulator',
-                harness='marionette',
-                server=es_server,
-                restserver=None,
-                machine=socket.gethostname(),
-                logfile=logfile)
-
-            testgroup.set_primary_product(
-                tree=self.tree,
-                buildtype='opt',
-                revision=self.revision)
-
-            testgroup.add_test_suite(
-                testsuite='b2g emulator testsuite',
-                elapsedtime=elapsedtime.seconds,
-                cmdline='',
-                passed=self.passed,
-                failed=self.failed,
-                todo=self.todo)
-
-            # Add in the test failures.
-            for f in self.failures:
-                testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])
-
-            testgroup.submit()
-
     def run_tests(self, tests):
         self.reset_test_stats()
         starttime = datetime.utcnow()
 
         need_external_ip = True
         if not self.marionette:
             self.start_marionette()
             if self.emulator:
@@ -803,53 +643,54 @@ class BaseMarionetteTestRunner(object):
             if not self._capabilities:
                 self.capabilities
             # if we're working against a desktop version, we usually don't need
             # an external ip
             if self._capabilities['device'] == "desktop":
                 need_external_ip = False
 
         if not self.httpd:
-            print "starting httpd"
+            self.logger.info("starting httpd")
             self.start_httpd(need_external_ip)
 
         for test in tests:
             self.add_test(test)
 
+        self.logger.suite_start(self.tests)
+
         counter = self.repeat
         while counter >=0:
             round = self.repeat - counter
             if round > 0:
                 self.logger.info('\nREPEAT %d\n-------' % round)
             self.run_test_sets()
             counter -= 1
+
         self.logger.info('\nSUMMARY\n-------')
         self.logger.info('passed: %d' % self.passed)
         if self.unexpected_successes == 0:
             self.logger.info('failed: %d' % self.failed)
         else:
-            self.logger.info('failed: %d (unexpected sucesses: %d)', self.failed, self.unexpected_successes)
+            self.logger.info('failed: %d (unexpected sucesses: %d)' % (self.failed, self.unexpected_successes))
         if self.skipped == 0:
-            self.logger.info('todo: %d', self.todo)
+            self.logger.info('todo: %d' % self.todo)
         else:
-            self.logger.info('todo: %d (skipped: %d)', self.todo, self.skipped)
+            self.logger.info('todo: %d (skipped: %d)' % (self.todo, self.skipped))
 
         if self.failed > 0:
             self.logger.info('\nFAILED TESTS\n-------')
             for failed_test in self.failures:
                 self.logger.info('%s' % failed_test[0])
 
         try:
             self.marionette.check_for_crash()
         except:
             traceback.print_exc()
 
         self.elapsedtime = datetime.utcnow() - starttime
-        if self.autolog:
-            self.post_to_autolog(self.elapsedtime)
 
         if self.xml_output:
             xml_dir = os.path.dirname(os.path.abspath(self.xml_output))
             if not os.path.exists(xml_dir):
                 os.makedirs(xml_dir)
             with open(self.xml_output, 'w') as f:
                 f.write(self.generate_xml(self.results))
 
@@ -859,16 +700,18 @@ class BaseMarionetteTestRunner(object):
 
         self.marionette.cleanup()
 
         for run_tests in self.mixin_run_tests:
             run_tests(tests)
         if self.shuffle:
             self.logger.info("Using seed where seed is:%d" % self.shuffle_seed)
 
+        self.logger.suite_end()
+
     def add_test(self, test, expected='pass', oop=None):
         filepath = os.path.abspath(test)
 
         if os.path.isdir(filepath):
             for root, dirs, files in os.walk(filepath):
                 for filename in files:
                     if ((filename.startswith('test_') or filename.startswith('browser_')) and
                         (filename.endswith('.py') or filename.endswith('.js'))):
@@ -967,17 +810,16 @@ class BaseMarionetteTestRunner(object):
             # This test is added by directory enumeration or directly specified
             # in argument list.  We have no manifest information here so we just
             # respect the "--type=[+-]oop" argument here.
             oop = file_ext == '.js' and testarg_oop == 'true'
 
         self.tests.append({'filepath': filepath, 'expected': expected, 'oop': oop})
 
     def run_test(self, filepath, expected, oop):
-        self.logger.info('TEST-START %s' % os.path.basename(filepath))
 
         testloader = unittest.TestLoader()
         suite = unittest.TestSuite()
         self.test_kwargs['expected'] = expected
         self.test_kwargs['oop'] = oop
         mod_name = os.path.splitext(os.path.split(filepath)[-1])[0]
         for handler in self.test_handlers:
             if handler.match(os.path.basename(filepath)):
@@ -986,34 +828,34 @@ class BaseMarionetteTestRunner(object):
                                            suite,
                                            testloader,
                                            self.marionette,
                                            self.testvars,
                                            **self.test_kwargs)
                 break
 
         if suite.countTestCases():
-            runner = self.textrunnerclass(verbosity=3,
+            runner = self.textrunnerclass(logger=self.logger,
                                           marionette=self.marionette,
                                           capabilities=self.capabilities)
             results = runner.run(suite)
             self.results.append(results)
 
             self.failed += len(results.failures) + len(results.errors)
             if hasattr(results, 'skipped'):
                 self.skipped += len(results.skipped)
                 self.todo += len(results.skipped)
             self.passed += results.passed
             for failure in results.failures + results.errors:
                 self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
             if hasattr(results, 'unexpectedSuccesses'):
                 self.failed += len(results.unexpectedSuccesses)
                 self.unexpected_successes += len(results.unexpectedSuccesses)
                 for failure in results.unexpectedSuccesses:
-                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
+                    self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-PASS'))
             if hasattr(results, 'expectedFailures'):
                 self.todo += len(results.expectedFailures)
 
     def run_test_set(self, tests):
         if self.shuffle:
             random.seed(self.shuffle_seed)
             random.shuffle(tests)
 
@@ -1127,9 +969,8 @@ class BaseMarionetteTestRunner(object):
             for result in results.tests_passed:
                 _extract_xml_from_result(result)
 
         for test in self.manifest_skipped_tests:
             _extract_xml_from_skipped_manifest_test(test)
 
         doc.appendChild(testsuite)
         return doc.toprettyxml(encoding='utf-8')
-
--- a/testing/marionette/client/marionette/runner/mixins/b2g.py
+++ b/testing/marionette/client/marionette/runner/mixins/b2g.py
@@ -51,53 +51,58 @@ class B2GTestCaseMixin(object):
 
 
 class B2GTestResultMixin(object):
 
     def __init__(self, *args, **kwargs):
         self.result_modifiers.append(self.b2g_output_modifier)
         self.b2g_pid = kwargs.pop('b2g_pid')
 
-    def b2g_output_modifier(self, test, result_expected, result_actual, output, context):
+    def _diagnose_socket(self):
         # This function will check if b2g is running and report any recent errors. This is
         # used in automation since a plain timeout error doesn't tell you
         # much information about what actually is going on
-        def diagnose_socket(output):
-            dm_type = os.environ.get('DM_TRANS', 'adb')
-            if dm_type == 'adb':
-                device_manager = get_dm(self.marionette)
-                pid = get_b2g_pid(device_manager)
-                if pid:
-                    # find recent errors
-                    message = ""
-                    error_re = re.compile(r"""[\s\S]*(exception|error)[\s\S]*""", flags=re.IGNORECASE)
-                    logcat = device_manager.getLogcat()
-                    latest = []
-                    iters = len(logcat) - 1
-                    # reading from the latest line
-                    while len(latest) < 5 and iters >= 0:
-                        line = logcat[iters]
-                        error_log_line = error_re.match(line)
-                        if error_log_line is not None:
-                            latest.append(line)
-                        iters -= 1
-                    message += "\nMost recent errors/exceptions are:\n"
-                    for line in reversed(latest):
-                        message += "%s" % line
-                    b2g_status = ""
-                    if pid != self.b2g_pid:
-                        b2g_status = "The B2G process has restarted after crashing during the tests so "
-                    else:
-                        b2g_status = "B2G is still running but "
-                    output += "%s\n%sMarionette can't respond due to either a Gecko, Gaia or Marionette error. " \
-                              "Above, the 5 most recent errors are " \
-                              "listed. Check logcat for all errors if these errors are not the cause " \
-                              "of the failure." % (message, b2g_status)
+
+        extra_output = None
+        dm_type = os.environ.get('DM_TRANS', 'adb')
+        if dm_type == 'adb':
+            device_manager = get_dm(self.marionette)
+            pid = get_b2g_pid(device_manager)
+            if pid:
+                # find recent errors
+                message = ""
+                error_re = re.compile(r"""[\s\S]*(exception|error)[\s\S]*""",
+                                      flags=re.IGNORECASE)
+                logcat = device_manager.getLogcat()
+                latest = []
+                iters = len(logcat) - 1
+                # reading from the latest line
+                while len(latest) < 5 and iters >= 0:
+                    line = logcat[iters]
+                    error_log_line = error_re.match(line)
+                    if error_log_line is not None:
+                        latest.append(line)
+                    iters -= 1
+                message += "\nMost recent errors/exceptions are:\n"
+                for line in reversed(latest):
+                    message += "%s" % line
+                b2g_status = ""
+                if pid != self.b2g_pid:
+                    b2g_status = "The B2G process has restarted after crashing during  the tests so "
                 else:
-                    output += "B2G process has died"
-            return output
+                    b2g_status = "B2G is still running but "
+                extra_output = ("%s\n%sMarionette can't respond due to either a Gecko, Gaia or Marionette error. "
+                                "Above, the 5 most recent errors are listed. "
+                                "Check logcat for all errors if these errors are not the cause "
+                                "of the failure." % (message, b2g_status))
+            else:
+                extra_output = "B2G process has died"
+        return extra_output
+
+    def b2g_output_modifier(self, test, result_expected, result_actual, output, context):
         # output is the actual string output from the test, so we have to do string comparison
-        if "Broken pipe" in output:
-            output = diagnose_socket(output)
-        elif "Connection timed out" in output:
-            output = diagnose_socket(output)
+        if "Broken pipe" in output or "Connection timed out" in output:
+            extra_output = self._diagnose_socket()
+            if extra_output:
+                self.logger.error(extra_output)
+                output += extra_output
+
         return result_expected, result_actual, output, context
-
--- a/testing/marionette/client/marionette/runtests.py
+++ b/testing/marionette/client/marionette/runtests.py
@@ -1,34 +1,68 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import sys
 
 from marionette_test import MarionetteTestCase, MarionetteJSTestCase
+from mozlog import structured
 from runner import BaseMarionetteTestRunner, BaseMarionetteOptions
 
+class MarionetteTbplFormatter(structured.formatters.TbplFormatter):
+    """Formatter that logs failures in a format that agrees with legacy
+    log data used by tbpl."""
+
+    def test_end(self, data):
+        # TBPL ui expects relevant info about an exception to appear in the first
+        # line of the log message, however tracebacks provided by marionette
+        # put this information on the last line.
+        if "message" in data:
+            message_lines = [line for line in data["message"].splitlines() if line != ""]
+            if "Traceback" in message_lines[0]:
+                exc_msg_index = None
+                for index, line in enumerate(message_lines):
+                    if "Error: " in line or "Exception: " in line:
+                        exc_msg_index = index
+                        break
+                if exc_msg_index:
+                    message_lines = (message_lines[exc_msg_index:] +
+                                     message_lines[:exc_msg_index])
+            data["message"] = "\n".join(message_lines)
+        return super(MarionetteTbplFormatter, self).test_end(data)
 
 class MarionetteTestRunner(BaseMarionetteTestRunner):
     def __init__(self, **kwargs):
         BaseMarionetteTestRunner.__init__(self, **kwargs)
         self.test_handlers = [MarionetteTestCase, MarionetteJSTestCase]
 
+def startTestRunner(runner_class, options, tests):
 
-def startTestRunner(runner_class, options, tests):
     runner = runner_class(**vars(options))
     runner.run_tests(tests)
     return runner
 
-
 def cli(runner_class=MarionetteTestRunner, parser_class=BaseMarionetteOptions):
     parser = parser_class(usage='%prog [options] test_file_or_dir <test_file_or_dir> ...')
+    structured.commandline.add_logging_group(parser)
     options, tests = parser.parse_args()
     parser.verify_usage(options, tests)
 
+    logger = structured.commandline.setup_logging(options.logger_name,
+                                                  options,
+                                                  {})
+
+    # Only add the tbpl logger if a handler isn't already logging to stdout
+    has_stdout_logger = any([h.stream == sys.stdout for h in logger.handlers])
+    if not has_stdout_logger:
+        formatter = MarionetteTbplFormatter()
+        handler = structured.handlers.StreamHandler(sys.stdout, formatter)
+        logger.add_handler(handler)
+
+    options.logger = logger
+
     runner = startTestRunner(runner_class, options, tests)
     if runner.failed > 0:
         sys.exit(10)
 
 if __name__ == "__main__":
     cli()
-
--- a/testing/marionette/client/requirements.txt
+++ b/testing/marionette/client/requirements.txt
@@ -1,12 +1,13 @@
 marionette-transport == 0.2
 manifestparser
 mozhttpd >= 0.5
 mozinfo >= 0.7
 mozprocess >= 0.9
 mozrunner >= 6.0
 mozdevice >= 0.37
+mozlog >= 2.0
 moznetwork >= 0.21
 mozcrash >= 0.5
 mozprofile >= 0.7
 moztest >= 0.1
 mozversion >= 0.2
--- a/testing/marionette/client/setup.py
+++ b/testing/marionette/client/setup.py
@@ -1,13 +1,13 @@
 import os
 from setuptools import setup, find_packages
 import sys
 
-version = '0.7.11'
+version = '0.8'
 
 # dependencies
 with open('requirements.txt') as f:
     deps = f.read().splitlines()
 
 setup(name='marionette_client',
       version=version,
       description="Marionette test automation client",
--- a/testing/marionette/mach_commands.py
+++ b/testing/marionette/mach_commands.py
@@ -1,14 +1,18 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import unicode_literals
+
 import os
+import sys
+
+from mozlog import structured
 
 from mozbuild.base import (
     MachCommandBase,
     MachCommandConditions as conditions,
 )
 
 from mach.decorators import (
     CommandArgument,
@@ -21,17 +25,17 @@ The %s command requires a Marionette-ena
 
 Please create an engineering build, which has Marionette enabled.  You can do
 this by ommitting the VARIANT variable when building, or using:
 
 VARIANT=eng ./build.sh
 '''
 
 def run_marionette(tests, b2g_path=None, emulator=None, testtype=None,
-    address=None, bin=None, topsrcdir=None):
+    address=None, binary=None, topsrcdir=None):
     from marionette.runtests import (
         MarionetteTestRunner,
         BaseMarionetteOptions,
         startTestRunner
     )
 
     parser = BaseMarionetteOptions()
     options, args = parser.parse_args()
@@ -41,23 +45,27 @@ def run_marionette(tests, b2g_path=None,
                     'testing/marionette/client/marionette/tests/unit-tests.ini')]
 
     options.type = testtype
     if b2g_path:
         options.homedir = b2g_path
         if emulator:
             options.emulator = emulator
     else:
-        options.bin = bin
-        path, exe = os.path.split(options.bin)
+        options.binary = binary
+        path, exe = os.path.split(options.binary)
 
     options.address = address
 
     parser.verify_usage(options, tests)
 
+    options.logger = structured.commandline.setup_logging("Marionette Unit Tests",
+                                                          options,
+                                                          {"mach": sys.stdout})
+
     runner = startTestRunner(MarionetteTestRunner, options, tests)
     if runner.failed > 0:
         return 1
 
     return 0
 
 @CommandProvider
 class B2GCommands(MachCommandBase):
@@ -98,11 +106,11 @@ class MachCommands(MachCommandBase):
     @CommandArgument('--address',
         help='host:port of running Gecko instance to connect to.')
     @CommandArgument('--type', dest='testtype',
         help='Test type, usually one of: browser, b2g, b2g-qemu.',
         default='browser')
     @CommandArgument('tests', nargs='*', metavar='TESTS',
         help='Path to test(s) to run.')
     def run_marionette_test(self, tests, address=None, testtype=None):
-        bin = self.get_binary_path('app')
-        return run_marionette(tests, bin=bin, testtype=testtype,
+        binary = self.get_binary_path('app')
+        return run_marionette(tests, binary=binary, testtype=testtype,
             topsrcdir=self.topsrcdir, address=address)