Backed out changeset e4c57f58a0ad (bug 956739) for changing Marionette failure output in a TBPL-unfriendly way.
authorRyan VanderMeulen <ryanvm@gmail.com>
Thu, 26 Jun 2014 12:00:55 -0400
changeset 191043 09e5168bf8afe9dc240e0dc8dc24a402955b71ae
parent 191042 39192ab47239c73817b4ec8090f4ef498b2110e2
child 191044 4fc85d544dab801764455cf356e50bc22c19be78
push id27024
push userryanvm@gmail.com
push dateThu, 26 Jun 2014 21:13:08 +0000
treeherdermozilla-central@3519e987aa3b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
bugs956739
milestone33.0a1
backs oute4c57f58a0adb749b6f6b1a7b308be85ce9cdcd6
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out changeset e4c57f58a0ad (bug 956739) for changing Marionette failure output in a TBPL-unfriendly way.
testing/marionette/client/marionette/runner/base.py
testing/marionette/client/marionette/runner/mixins/b2g.py
testing/marionette/client/marionette/runtests.py
testing/marionette/client/requirements.txt
testing/marionette/client/setup.py
testing/marionette/mach_commands.py
testing/mozbase/mozlog/mozlog/structured/formatters/tbplformatter.py
--- a/testing/marionette/client/marionette/runner/base.py
+++ b/testing/marionette/client/marionette/runner/base.py
@@ -1,65 +1,64 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
+from optparse import OptionParser
 from datetime import datetime
-from optparse import OptionParser
-
-import json
-import mozinfo
-import moznetwork
+import logging
 import os
-import random
-import re
+import unittest
 import socket
 import sys
 import time
 import traceback
-import unittest
+import random
+import re
+import mozinfo
+import moznetwork
 import xml.dom.minidom as dom
 
 from manifestparser import TestManifest
+from mozhttpd import MozHttpd
 from marionette import Marionette
+from moztest.results import TestResultCollection, TestResult, relevant_line
 from mixins.b2g import B2GTestResultMixin, get_b2g_pid, get_dm
-from mozhttpd import MozHttpd
-from moztest.adapters.unit import StructuredTestRunner, StructuredTestResult
-from moztest.results import TestResultCollection, TestResult, relevant_line
+
 
 class MarionetteTest(TestResult):
 
     @property
     def test_name(self):
         if self.test_class is not None:
             return '%s.py %s.%s' % (self.test_class.split('.')[0],
                                     self.test_class,
                                     self.name)
         else:
             return self.name
 
 
-class MarionetteTestResult(StructuredTestResult, TestResultCollection):
+class MarionetteTestResult(unittest._TextTestResult, TestResultCollection):
 
     resultClass = MarionetteTest
 
     def __init__(self, *args, **kwargs):
         self.marionette = kwargs.pop('marionette')
         TestResultCollection.__init__(self, 'MarionetteTest')
         self.passed = 0
         self.testsRun = 0
         self.result_modifiers = [] # used by mixins to modify the result
         pid = kwargs.pop('b2g_pid')
         if pid:
             if B2GTestResultMixin not in self.__class__.__bases__:
                 bases = [b for b in self.__class__.__bases__]
                 bases.append(B2GTestResultMixin)
                 self.__class__.__bases__ = tuple(bases)
             B2GTestResultMixin.__init__(self, b2g_pid=pid)
-        StructuredTestResult.__init__(self, *args, **kwargs)
+        unittest._TextTestResult.__init__(self, *args, **kwargs)
 
     @property
     def skipped(self):
         return [t for t in self if t.result == 'SKIPPED']
 
     @skipped.setter
     def skipped(self, value):
         pass
@@ -127,41 +126,67 @@ class MarionetteTestResult(StructuredTes
         t.finish(result_actual,
                  time_end=time.time() if test.start_time else 0,
                  reason=relevant_line(output),
                  output=output)
         self.append(t)
 
     def addError(self, test, err):
         self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='ERROR')
-        super(MarionetteTestResult, self).addError(test, err)
+        self._mirrorOutput = True
+        if self.showAll:
+            self.stream.writeln("ERROR")
+        elif self.dots:
+            self.stream.write('E')
+            self.stream.flush()
 
     def addFailure(self, test, err):
         self.add_test_result(test, output=self._exc_info_to_string(err, test), result_actual='UNEXPECTED-FAIL')
-        super(MarionetteTestResult, self).addFailure(test, err)
+        self._mirrorOutput = True
+        if self.showAll:
+            self.stream.writeln("FAIL")
+        elif self.dots:
+            self.stream.write('F')
+            self.stream.flush()
 
     def addSuccess(self, test):
         self.passed += 1
         self.add_test_result(test, result_actual='PASS')
-        super(MarionetteTestResult, self).addSuccess(test)
+        if self.showAll:
+            self.stream.writeln("ok")
+        elif self.dots:
+            self.stream.write('.')
+            self.stream.flush()
 
     def addExpectedFailure(self, test, err):
         """Called when an expected failure/error occured."""
         self.add_test_result(test, output=self._exc_info_to_string(err, test),
                         result_actual='KNOWN-FAIL')
-        super(MarionetteTestResult, self).addExpectedFailure(test, err)
+        if self.showAll:
+            self.stream.writeln("expected failure")
+        elif self.dots:
+            self.stream.write("x")
+            self.stream.flush()
 
     def addUnexpectedSuccess(self, test):
         """Called when a test was expected to fail, but succeed."""
         self.add_test_result(test, result_actual='UNEXPECTED-PASS')
-        super(MarionetteTestResult, self).addUnexpectedSuccess(test)
+        if self.showAll:
+            self.stream.writeln("unexpected success")
+        elif self.dots:
+            self.stream.write("u")
+            self.stream.flush()
 
     def addSkip(self, test, reason):
         self.add_test_result(test, output=reason, result_actual='SKIPPED')
-        super(MarionetteTestResult, self).addSkip(test, reason)
+        if self.showAll:
+            self.stream.writeln("skipped {0!r}".format(reason))
+        elif self.dots:
+            self.stream.write("s")
+            self.stream.flush()
 
     def getInfo(self, test):
         return test.test_name
 
     def getDescription(self, test):
         doc_first_line = test.shortDescription()
         if self.descriptions and doc_first_line:
             return '\n'.join((str(test), doc_first_line))
@@ -179,85 +204,144 @@ class MarionetteTestResult(StructuredTes
                 skip_log = True
                 for line in testcase.loglines:
                     str_line = ' '.join(line)
                     if not 'TEST-END' in str_line and not 'TEST-START' in str_line:
                         skip_log = False
                         break
                 if skip_log:
                     return
-                self.logger.info('START LOG:')
+                self.stream.writeln('\nSTART LOG:')
                 for line in testcase.loglines:
-                    self.logger.info(' '.join(line).encode('ascii', 'replace'))
-                self.logger.info('END LOG:')
+                    self.stream.writeln(' '.join(line).encode('ascii', 'replace'))
+                self.stream.writeln('END LOG:')
 
     def printErrorList(self, flavour, errors):
         TIMEOUT_MESSAGE = "ScriptTimeoutException: ScriptTimeoutException: timed out"
         for error in errors:
             err = error.output
-            self.logger.error("%s: %s" % (flavour, error.description))
+            self.stream.writeln(self.separator1)
+            self.stream.writeln("%s: %s" % (flavour, error.description))
+            self.stream.writeln(self.separator2)
             lastline = None
             fail_present = None
             test_name = self.getInfo(error)
             for line in err:
                 if not line.startswith('\t') and line != '':
                     lastline = line
                 if 'TEST-UNEXPECTED-FAIL' in line:
                     fail_present = True
             for line in err:
                 if line != lastline or fail_present:
                     if re.match('.*\.js', test_name):
                         if error.reason != TIMEOUT_MESSAGE:
-                            self.logger.error(line)
+                            self.stream.writeln("%s" % line)
                     else:
-                        self.logger.error(line)
+                        self.stream.writeln("%s" % line)
+
                 else:
-                    self.logger.error("%s | %s" %
-                                      (test_name, error.reason))
+                    self.stream.writeln("TEST-UNEXPECTED-FAIL | %s | %s" %
+                                        (test_name, error.reason))
 
     def stopTest(self, *args, **kwargs):
         unittest._TextTestResult.stopTest(self, *args, **kwargs)
         if self.marionette.check_for_crash():
             # this tells unittest.TestSuite not to continue running tests
             self.shouldStop = True
 
 
-class MarionetteTextTestRunner(StructuredTestRunner):
+class MarionetteTextTestRunner(unittest.TextTestRunner):
 
     resultclass = MarionetteTestResult
 
     def __init__(self, **kwargs):
-        self.marionette = kwargs.pop('marionette')
+        self.marionette = kwargs['marionette']
         self.capabilities = kwargs.pop('capabilities')
         self.pre_run_functions = []
         self.b2g_pid = None
+        del kwargs['marionette']
 
         if self.capabilities["device"] != "desktop" and self.capabilities["browserName"] == "B2G":
             def b2g_pre_run():
                 dm_type = os.environ.get('DM_TRANS', 'adb')
                 if dm_type == 'adb':
                     self.b2g_pid = get_b2g_pid(get_dm(self.marionette))
             self.pre_run_functions.append(b2g_pre_run)
 
-        StructuredTestRunner.__init__(self, **kwargs)
-
+        unittest.TextTestRunner.__init__(self, **kwargs)
 
     def _makeResult(self):
         return self.resultclass(self.stream,
                                 self.descriptions,
                                 self.verbosity,
                                 marionette=self.marionette,
-                                b2g_pid=self.b2g_pid,
-                                logger=self.logger)
+                                b2g_pid=self.b2g_pid)
 
     def run(self, test):
         "Run the given test case or test suite."
         for pre_run_func in self.pre_run_functions:
             pre_run_func()
-        return super(MarionetteTextTestRunner, self).run(test)
+        result = self._makeResult()
+        if hasattr(self, 'failfast'):
+            result.failfast = self.failfast
+        if hasattr(self, 'buffer'):
+            result.buffer = self.buffer
+        startTime = time.time()
+        startTestRun = getattr(result, 'startTestRun', None)
+        if startTestRun is not None:
+            startTestRun()
+        try:
+            test(result)
+        finally:
+            stopTestRun = getattr(result, 'stopTestRun', None)
+            if stopTestRun is not None:
+                stopTestRun()
+        stopTime = time.time()
+        if hasattr(result, 'time_taken'):
+            result.time_taken = stopTime - startTime
+        result.printLogs(test)
+        result.printErrors()
+        if hasattr(result, 'separator2'):
+            self.stream.writeln(result.separator2)
+        run = result.testsRun
+        self.stream.writeln("Ran %d test%s in %.3fs" %
+                            (run, run != 1 and "s" or "", result.time_taken))
+        self.stream.writeln()
+
+        expectedFails = unexpectedSuccesses = skipped = 0
+        try:
+            results = map(len, (result.expectedFailures,
+                                result.unexpectedSuccesses,
+                                result.skipped))
+        except AttributeError:
+            pass
+        else:
+            expectedFails, unexpectedSuccesses, skipped = results
+
+        infos = []
+        if not result.wasSuccessful():
+            self.stream.write("FAILED")
+            failed, errored = map(len, (result.failures, result.errors))
+            if failed:
+                infos.append("failures=%d" % failed)
+            if errored:
+                infos.append("errors=%d" % errored)
+        else:
+            self.stream.write("OK")
+        if skipped:
+            infos.append("skipped=%d" % skipped)
+        if expectedFails:
+            infos.append("expected failures=%d" % expectedFails)
+        if unexpectedSuccesses:
+            infos.append("unexpected successes=%d" % unexpectedSuccesses)
+        if infos:
+            self.stream.writeln(" (%s)" % (", ".join(infos),))
+        else:
+            self.stream.write("\n")
+        return result
 
 
 class BaseMarionetteOptions(OptionParser):
     def __init__(self, **kwargs):
         OptionParser.__init__(self, **kwargs)
         self.parse_args_handlers = [] # Used by mixins
         self.verify_usage_handlers = [] # Used by mixins
         self.add_option('--autolog',
@@ -334,17 +418,17 @@ class BaseMarionetteOptions(OptionParser
                         action='store',
                         help='application to use')
         self.add_option('--app-arg',
                         dest='app_args',
                         action='append',
                         default=[],
                         help='specify a command line argument to be passed onto the application')
         self.add_option('--binary',
-                        dest='binary',
+                        dest='bin',
                         action='store',
                         help='gecko executable to launch before running the test')
         self.add_option('--profile',
                         dest='profile',
                         action='store',
                         help='profile to use when launching the gecko process. if not passed, then a profile will be '
                              'constructed and used')
         self.add_option('--repeat',
@@ -369,16 +453,20 @@ class BaseMarionetteOptions(OptionParser
         self.add_option('--symbols-path',
                         dest='symbols_path',
                         action='store',
                         help='absolute path to directory containing breakpad symbols, or the url of a zip file containing symbols')
         self.add_option('--timeout',
                         dest='timeout',
                         type=int,
                         help='if a --timeout value is given, it will set the default page load timeout, search timeout and script timeout to the given value. If not passed in, it will use the default values of 30000ms for page load, 0ms for search timeout and 10000ms for script timeout')
+        self.add_option('--es-server',
+                        dest='es_servers',
+                        action='append',
+                        help='the ElasticSearch server to use for autolog submission')
         self.add_option('--shuffle',
                         action='store_true',
                         dest='shuffle',
                         default=False,
                         help='run tests in a random order')
         self.add_option('--shuffle-seed',
                         dest='shuffle_seed',
                         type=int,
@@ -415,24 +503,28 @@ class BaseMarionetteOptions(OptionParser
 
         return (options, tests)
 
     def verify_usage(self, options, tests):
         if not tests:
             print 'must specify one or more test files, manifests, or directories'
             sys.exit(1)
 
-        if not options.emulator and not options.address and not options.binary:
+        if not options.emulator and not options.address and not options.bin:
             print 'must specify --binary, --emulator or --address'
             sys.exit(1)
 
-        if options.emulator and options.binary:
+        if options.emulator and options.bin:
             print 'can\'t specify both --emulator and --binary'
             sys.exit(1)
 
+        if not options.es_servers:
+            options.es_servers = ['elasticsearch-zlb.dev.vlan81.phx.mozilla.com:9200',
+                                  'elasticsearch-zlb.webapp.scl3.mozilla.com:9200']
+
         # default to storing logcat output for emulator runs
         if options.emulator and not options.logdir:
             options.logdir = 'logcat'
 
         # check for valid resolution string, strip whitespaces
         try:
             if options.emulator_res:
                 dims = options.emulator_res.split('x')
@@ -463,34 +555,34 @@ class BaseMarionetteOptions(OptionParser
 
 
 class BaseMarionetteTestRunner(object):
 
     textrunnerclass = MarionetteTextTestRunner
 
     def __init__(self, address=None, emulator=None, emulator_binary=None,
                  emulator_img=None, emulator_res='480x800', homedir=None,
-                 app=None, app_args=None, binary=None, profile=None, autolog=False,
+                 app=None, app_args=None, bin=None, profile=None, autolog=False,
                  revision=None, logger=None, testgroup="marionette", no_window=False,
                  logdir=None, xml_output=None, repeat=0,
                  testvars=None, tree=None, type=None, device_serial=None,
-                 symbols_path=None, timeout=None, shuffle=False,
+                 symbols_path=None, timeout=None, es_servers=None, shuffle=False,
                  shuffle_seed=random.randint(0, sys.maxint), sdcard=None,
                  this_chunk=1, total_chunks=1, sources=None, server_root=None,
                  gecko_log=None,
                  **kwargs):
         self.address = address
         self.emulator = emulator
         self.emulator_binary = emulator_binary
         self.emulator_img = emulator_img
         self.emulator_res = emulator_res
         self.homedir = homedir
         self.app = app
         self.app_args = app_args or []
-        self.bin = binary
+        self.bin = bin
         self.profile = profile
         self.autolog = autolog
         self.testgroup = testgroup
         self.revision = revision
         self.logger = logger
         self.no_window = no_window
         self.httpd = None
         self.marionette = None
@@ -502,45 +594,52 @@ class BaseMarionetteTestRunner(object):
         self.tree = tree
         self.type = type
         self.device_serial = device_serial
         self.symbols_path = symbols_path
         self.timeout = timeout
         self._device = None
         self._capabilities = None
         self._appName = None
+        self.es_servers = es_servers
         self.shuffle = shuffle
         self.shuffle_seed = shuffle_seed
         self.sdcard = sdcard
         self.sources = sources
         self.server_root = server_root
         self.this_chunk = this_chunk
         self.total_chunks = total_chunks
         self.gecko_log = gecko_log
         self.mixin_run_tests = []
         self.manifest_skipped_tests = []
         self.tests = []
 
         if testvars:
             if not os.path.exists(testvars):
                 raise IOError('--testvars file does not exist')
 
+            import json
             try:
                 with open(testvars) as f:
                     self.testvars = json.loads(f.read())
             except ValueError as e:
                 json_path = os.path.abspath(testvars)
                 raise Exception("JSON file (%s) is not properly "
                                 "formatted: %s" % (json_path, e.message))
 
         # set up test handlers
         self.test_handlers = []
 
         self.reset_test_stats()
 
+        if self.logger is None:
+            self.logger = logging.getLogger('Marionette')
+            self.logger.setLevel(logging.INFO)
+            self.logger.addHandler(logging.StreamHandler())
+
         if self.logdir:
             if not os.access(self.logdir, os.F_OK):
                 os.mkdir(self.logdir)
 
         # for XML output
         self.testvars['xml_output'] = self.xml_output
         self.results = []
 
@@ -642,16 +741,60 @@ class BaseMarionetteTestRunner(object):
                 'no_window': self.no_window,
                 'sdcard': self.sdcard,
             })
         return kwargs
 
     def start_marionette(self):
         self.marionette = Marionette(**self._build_kwargs())
 
+    def post_to_autolog(self, elapsedtime):
+        self.logger.info('posting results to autolog')
+
+        logfile = None
+        if self.emulator:
+            filename = os.path.join(os.path.abspath(self.logdir),
+                                    "emulator-%d.log" % self.marionette.emulator.port)
+            if os.access(filename, os.F_OK):
+                logfile = filename
+
+        for es_server in self.es_servers:
+
+            # This is all autolog stuff.
+            # See: https://wiki.mozilla.org/Auto-tools/Projects/Autolog
+            from mozautolog import RESTfulAutologTestGroup
+            testgroup = RESTfulAutologTestGroup(
+                testgroup=self.testgroup,
+                os='android',
+                platform='emulator',
+                harness='marionette',
+                server=es_server,
+                restserver=None,
+                machine=socket.gethostname(),
+                logfile=logfile)
+
+            testgroup.set_primary_product(
+                tree=self.tree,
+                buildtype='opt',
+                revision=self.revision)
+
+            testgroup.add_test_suite(
+                testsuite='b2g emulator testsuite',
+                elapsedtime=elapsedtime.seconds,
+                cmdline='',
+                passed=self.passed,
+                failed=self.failed,
+                todo=self.todo)
+
+            # Add in the test failures.
+            for f in self.failures:
+                testgroup.add_test_failure(test=f[0], text=f[1], status=f[2])
+
+            testgroup.submit()
+
     def run_tests(self, tests):
         self.reset_test_stats()
         starttime = datetime.utcnow()
 
         need_external_ip = True
         if not self.marionette:
             self.start_marionette()
             if self.emulator:
@@ -660,54 +803,53 @@ class BaseMarionetteTestRunner(object):
             if not self._capabilities:
                 self.capabilities
             # if we're working against a desktop version, we usually don't need
             # an external ip
             if self._capabilities['device'] == "desktop":
                 need_external_ip = False
 
         if not self.httpd:
-            self.logger.info("starting httpd")
+            print "starting httpd"
             self.start_httpd(need_external_ip)
 
         for test in tests:
             self.add_test(test)
 
-        self.logger.suite_start(self.tests)
-
         counter = self.repeat
         while counter >=0:
             round = self.repeat - counter
             if round > 0:
                 self.logger.info('\nREPEAT %d\n-------' % round)
             self.run_test_sets()
             counter -= 1
-
         self.logger.info('\nSUMMARY\n-------')
         self.logger.info('passed: %d' % self.passed)
         if self.unexpected_successes == 0:
             self.logger.info('failed: %d' % self.failed)
         else:
-            self.logger.info('failed: %d (unexpected sucesses: %d)' % (self.failed, self.unexpected_successes))
+            self.logger.info('failed: %d (unexpected sucesses: %d)', self.failed, self.unexpected_successes)
         if self.skipped == 0:
-            self.logger.info('todo: %d' % self.todo)
+            self.logger.info('todo: %d', self.todo)
         else:
-            self.logger.info('todo: %d (skipped: %d)' % (self.todo, self.skipped))
+            self.logger.info('todo: %d (skipped: %d)', self.todo, self.skipped)
 
         if self.failed > 0:
             self.logger.info('\nFAILED TESTS\n-------')
             for failed_test in self.failures:
                 self.logger.info('%s' % failed_test[0])
 
         try:
             self.marionette.check_for_crash()
         except:
             traceback.print_exc()
 
         self.elapsedtime = datetime.utcnow() - starttime
+        if self.autolog:
+            self.post_to_autolog(self.elapsedtime)
 
         if self.xml_output:
             xml_dir = os.path.dirname(os.path.abspath(self.xml_output))
             if not os.path.exists(xml_dir):
                 os.makedirs(xml_dir)
             with open(self.xml_output, 'w') as f:
                 f.write(self.generate_xml(self.results))
 
@@ -717,18 +859,16 @@ class BaseMarionetteTestRunner(object):
 
         self.marionette.cleanup()
 
         for run_tests in self.mixin_run_tests:
             run_tests(tests)
         if self.shuffle:
             self.logger.info("Using seed where seed is:%d" % self.shuffle_seed)
 
-        self.logger.suite_end()
-
     def add_test(self, test, expected='pass', oop=None):
         filepath = os.path.abspath(test)
 
         if os.path.isdir(filepath):
             for root, dirs, files in os.walk(filepath):
                 for filename in files:
                     if ((filename.startswith('test_') or filename.startswith('browser_')) and
                         (filename.endswith('.py') or filename.endswith('.js'))):
@@ -827,16 +967,17 @@ class BaseMarionetteTestRunner(object):
             # This test is added by directory enumeration or directly specified
             # in argument list.  We have no manifest information here so we just
             # respect the "--type=[+-]oop" argument here.
             oop = file_ext == '.js' and testarg_oop == 'true'
 
         self.tests.append({'filepath': filepath, 'expected': expected, 'oop': oop})
 
     def run_test(self, filepath, expected, oop):
+        self.logger.info('TEST-START %s' % os.path.basename(filepath))
 
         testloader = unittest.TestLoader()
         suite = unittest.TestSuite()
         self.test_kwargs['expected'] = expected
         self.test_kwargs['oop'] = oop
         mod_name = os.path.splitext(os.path.split(filepath)[-1])[0]
         for handler in self.test_handlers:
             if handler.match(os.path.basename(filepath)):
@@ -845,34 +986,34 @@ class BaseMarionetteTestRunner(object):
                                            suite,
                                            testloader,
                                            self.marionette,
                                            self.testvars,
                                            **self.test_kwargs)
                 break
 
         if suite.countTestCases():
-            runner = self.textrunnerclass(logger=self.logger,
+            runner = self.textrunnerclass(verbosity=3,
                                           marionette=self.marionette,
                                           capabilities=self.capabilities)
             results = runner.run(suite)
             self.results.append(results)
 
             self.failed += len(results.failures) + len(results.errors)
             if hasattr(results, 'skipped'):
                 self.skipped += len(results.skipped)
                 self.todo += len(results.skipped)
             self.passed += results.passed
             for failure in results.failures + results.errors:
                 self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-FAIL'))
             if hasattr(results, 'unexpectedSuccesses'):
                 self.failed += len(results.unexpectedSuccesses)
                 self.unexpected_successes += len(results.unexpectedSuccesses)
                 for failure in results.unexpectedSuccesses:
-                    self.failures.append((results.getInfo(failure), failure.output, 'TEST-UNEXPECTED-PASS'))
+                    self.failures.append((results.getInfo(failure), 'TEST-UNEXPECTED-PASS'))
             if hasattr(results, 'expectedFailures'):
                 self.todo += len(results.expectedFailures)
 
     def run_test_set(self, tests):
         if self.shuffle:
             random.seed(self.shuffle_seed)
             random.shuffle(tests)
 
@@ -986,8 +1127,9 @@ class BaseMarionetteTestRunner(object):
             for result in results.tests_passed:
                 _extract_xml_from_result(result)
 
         for test in self.manifest_skipped_tests:
             _extract_xml_from_skipped_manifest_test(test)
 
         doc.appendChild(testsuite)
         return doc.toprettyxml(encoding='utf-8')
+
--- a/testing/marionette/client/marionette/runner/mixins/b2g.py
+++ b/testing/marionette/client/marionette/runner/mixins/b2g.py
@@ -51,58 +51,53 @@ class B2GTestCaseMixin(object):
 
 
 class B2GTestResultMixin(object):
 
     def __init__(self, *args, **kwargs):
         self.result_modifiers.append(self.b2g_output_modifier)
         self.b2g_pid = kwargs.pop('b2g_pid')
 
-    def _diagnose_socket(self):
+    def b2g_output_modifier(self, test, result_expected, result_actual, output, context):
         # This function will check if b2g is running and report any recent errors. This is
         # used in automation since a plain timeout error doesn't tell you
         # much information about what actually is going on
+        def diagnose_socket(output):
+            dm_type = os.environ.get('DM_TRANS', 'adb')
+            if dm_type == 'adb':
+                device_manager = get_dm(self.marionette)
+                pid = get_b2g_pid(device_manager)
+                if pid:
+                    # find recent errors
+                    message = ""
+                    error_re = re.compile(r"""[\s\S]*(exception|error)[\s\S]*""", flags=re.IGNORECASE)
+                    logcat = device_manager.getLogcat()
+                    latest = []
+                    iters = len(logcat) - 1
+                    # reading from the latest line
+                    while len(latest) < 5 and iters >= 0:
+                        line = logcat[iters]
+                        error_log_line = error_re.match(line)
+                        if error_log_line is not None:
+                            latest.append(line)
+                        iters -= 1
+                    message += "\nMost recent errors/exceptions are:\n"
+                    for line in reversed(latest):
+                        message += "%s" % line
+                    b2g_status = ""
+                    if pid != self.b2g_pid:
+                        b2g_status = "The B2G process has restarted after crashing during the tests so "
+                    else:
+                        b2g_status = "B2G is still running but "
+                    output += "%s\n%sMarionette can't respond due to either a Gecko, Gaia or Marionette error. " \
+                              "Above, the 5 most recent errors are " \
+                              "listed. Check logcat for all errors if these errors are not the cause " \
+                              "of the failure." % (message, b2g_status)
+                else:
+                    output += "B2G process has died"
+            return output
+        # output is the actual string output from the test, so we have to do string comparison
+        if "Broken pipe" in output:
+            output = diagnose_socket(output)
+        elif "Connection timed out" in output:
+            output = diagnose_socket(output)
+        return result_expected, result_actual, output, context
 
-        extra_output = None
-        dm_type = os.environ.get('DM_TRANS', 'adb')
-        if dm_type == 'adb':
-            device_manager = get_dm(self.marionette)
-            pid = get_b2g_pid(device_manager)
-            if pid:
-                # find recent errors
-                message = ""
-                error_re = re.compile(r"""[\s\S]*(exception|error)[\s\S]*""",
-                                      flags=re.IGNORECASE)
-                logcat = device_manager.getLogcat()
-                latest = []
-                iters = len(logcat) - 1
-                # reading from the latest line
-                while len(latest) < 5 and iters >= 0:
-                    line = logcat[iters]
-                    error_log_line = error_re.match(line)
-                    if error_log_line is not None:
-                        latest.append(line)
-                    iters -= 1
-                message += "\nMost recent errors/exceptions are:\n"
-                for line in reversed(latest):
-                    message += "%s" % line
-                b2g_status = ""
-                if pid != self.b2g_pid:
-                    b2g_status = "The B2G process has restarted after crashing during  the tests so "
-                else:
-                    b2g_status = "B2G is still running but "
-                extra_output = ("%s\n%sMarionette can't respond due to either a Gecko, Gaia or Marionette error. "
-                                "Above, the 5 most recent errors are listed. "
-                                "Check logcat for all errors if these errors are not the cause "
-                                "of the failure." % (message, b2g_status))
-            else:
-                extra_output = "B2G process has died"
-        return extra_output
-
-    def b2g_output_modifier(self, test, result_expected, result_actual, output, context):
-        # output is the actual string output from the test, so we have to do string comparison
-        if "Broken pipe" in output or "Connection timed out" in output:
-            extra_output = self._diagnose_socket()
-            if extra_output:
-                self.logger.error(extra_output)
-                output += extra_output
-
-        return result_expected, result_actual, output, context
--- a/testing/marionette/client/marionette/runtests.py
+++ b/testing/marionette/client/marionette/runtests.py
@@ -1,38 +1,34 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 import sys
 
 from marionette_test import MarionetteTestCase, MarionetteJSTestCase
-from mozlog import structured
 from runner import BaseMarionetteTestRunner, BaseMarionetteOptions
 
 
 class MarionetteTestRunner(BaseMarionetteTestRunner):
     def __init__(self, **kwargs):
         BaseMarionetteTestRunner.__init__(self, **kwargs)
         self.test_handlers = [MarionetteTestCase, MarionetteJSTestCase]
 
+
 def startTestRunner(runner_class, options, tests):
-
     runner = runner_class(**vars(options))
     runner.run_tests(tests)
     return runner
 
+
 def cli(runner_class=MarionetteTestRunner, parser_class=BaseMarionetteOptions):
     parser = parser_class(usage='%prog [options] test_file_or_dir <test_file_or_dir> ...')
-    structured.commandline.add_logging_group(parser)
     options, tests = parser.parse_args()
     parser.verify_usage(options, tests)
 
-    options.logger = structured.commandline.setup_logging("Marionette Unit Tests",
-                                                          options,
-                                                          {"tbpl": sys.stdout})
-
     runner = startTestRunner(runner_class, options, tests)
     if runner.failed > 0:
         sys.exit(10)
 
 if __name__ == "__main__":
     cli()
+
--- a/testing/marionette/client/requirements.txt
+++ b/testing/marionette/client/requirements.txt
@@ -1,13 +1,12 @@
 marionette-transport == 0.2
 manifestparser
 mozhttpd >= 0.5
 mozinfo >= 0.7
 mozprocess >= 0.9
 mozrunner >= 6.0
 mozdevice >= 0.37
-mozlog >= 2.0
 moznetwork >= 0.21
 mozcrash >= 0.5
 mozprofile >= 0.7
 moztest >= 0.1
 mozversion >= 0.2
--- a/testing/marionette/client/setup.py
+++ b/testing/marionette/client/setup.py
@@ -1,13 +1,13 @@
 import os
 from setuptools import setup, find_packages
 import sys
 
-version = '0.8'
+version = '0.7.10'
 
 # dependencies
 with open('requirements.txt') as f:
     deps = f.read().splitlines()
 
 setup(name='marionette_client',
       version=version,
       description="Marionette test automation client",
--- a/testing/marionette/mach_commands.py
+++ b/testing/marionette/mach_commands.py
@@ -1,18 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import unicode_literals
-
 import os
-import sys
-
-from mozlog import structured
 
 from mozbuild.base import (
     MachCommandBase,
     MachCommandConditions as conditions,
 )
 
 from mach.decorators import (
     CommandArgument,
@@ -25,17 +21,17 @@ The %s command requires a Marionette-ena
 
 Please create an engineering build, which has Marionette enabled.  You can do
 this by ommitting the VARIANT variable when building, or using:
 
 VARIANT=eng ./build.sh
 '''
 
 def run_marionette(tests, b2g_path=None, emulator=None, testtype=None,
-    address=None, binary=None, topsrcdir=None):
+    address=None, bin=None, topsrcdir=None):
     from marionette.runtests import (
         MarionetteTestRunner,
         BaseMarionetteOptions,
         startTestRunner
     )
 
     parser = BaseMarionetteOptions()
     options, args = parser.parse_args()
@@ -45,27 +41,23 @@ def run_marionette(tests, b2g_path=None,
                     'testing/marionette/client/marionette/tests/unit-tests.ini')]
 
     options.type = testtype
     if b2g_path:
         options.homedir = b2g_path
         if emulator:
             options.emulator = emulator
     else:
-        options.binary = binary
-        path, exe = os.path.split(options.binary)
+        options.bin = bin
+        path, exe = os.path.split(options.bin)
 
     options.address = address
 
     parser.verify_usage(options, tests)
 
-    options.logger = structured.commandline.setup_logging("Marionette Unit Tests",
-                                                          options,
-                                                          {"mach": sys.stdout})
-
     runner = startTestRunner(MarionetteTestRunner, options, tests)
     if runner.failed > 0:
         return 1
 
     return 0
 
 @CommandProvider
 class B2GCommands(MachCommandBase):
@@ -106,11 +98,11 @@ class MachCommands(MachCommandBase):
     @CommandArgument('--address',
         help='host:port of running Gecko instance to connect to.')
     @CommandArgument('--type', dest='testtype',
         help='Test type, usually one of: browser, b2g, b2g-qemu.',
         default='browser')
     @CommandArgument('tests', nargs='*', metavar='TESTS',
         help='Path to test(s) to run.')
     def run_marionette_test(self, tests, address=None, testtype=None):
-        binary = self.get_binary_path('app')
-        return run_marionette(tests, binary=binary, testtype=testtype,
+        bin = self.get_binary_path('app')
+        return run_marionette(tests, bin=bin, testtype=testtype,
             topsrcdir=self.topsrcdir, address=address)
--- a/testing/mozbase/mozlog/mozlog/structured/formatters/tbplformatter.py
+++ b/testing/mozbase/mozlog/mozlog/structured/formatters/tbplformatter.py
@@ -43,17 +43,17 @@ class TbplFormatter(BaseMachFormatter):
             return "TEST-%s | %s | %s | %s\n" % (
                 data["status"], self.id_str(data["test"]), data["subtest"], data.get("message", ""))
 
     def test_end(self, data):
         start_time = self.test_start_times.pop(self.test_id(data["test"]))
         time = data["time"] - start_time
 
         if "expected" in data:
-            return "TEST-UNEXPECTED-%s TEST-END | %s | expected %s | %s | took %ims\n" % (
+            return "TEST-END UNEXPECTED-%s | %s | expected %s | %s | took %ims\n" % (
                 data["status"], self.id_str(data["test"]), data["expected"],
                 data.get("message", ""), time)
         else:
             return "TEST-END %s | %s | took %ims\n" % (
                 data["status"], self.id_str(data["test"]), time)
 
     def suite_end(self, data):
         start_time = self.suite_start_time