author Andrew Halberstadt <>
Wed, 06 Dec 2017 14:11:55 -0500
changeset 449438 419ba12cc0c838d661b8ae6d47bbf328242773fc
parent 449364 9dcfb0365f3c758f9edbea67d94614cd582c1559
child 449458 4f37a0996fc6d1d0b6e02844714657afc39f7d52
permissions -rw-r--r--
Bug 1421799 - [reftest] Log 'GECKO(<pid>)' instead of 'ProcessReader' for process_output log actions r=gbrown MozReview-Commit-ID: FsKg5Gtwx9Z

# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at

Runs the reftest test harness.

import collections
import copy
import itertools
import json
import multiprocessing
import os
import platform
import re
import shutil
import signal
import subprocess
import sys
import threading
from datetime import datetime, timedelta

SCRIPT_DIRECTORY = os.path.abspath(
if SCRIPT_DIRECTORY not in sys.path:
    sys.path.insert(0, SCRIPT_DIRECTORY)

import mozcrash
import mozdebug
import mozinfo
import mozleak
import mozlog
import mozprocess
import mozprofile
import mozrunner
from mozrunner.utils import get_stack_fixer_function, test_environment
from mozscreenshot import printstatus, dump_screen

    from marionette_driver.addons import Addons
    from marionette_harness import Marionette
except ImportError, e:
    # Defer ImportError until attempt to use Marionette
    def reraise(*args, **kwargs):
    Marionette = reraise

from output import OutputHandler, ReftestFormatter
import reftestcommandline

here = os.path.abspath(os.path.dirname(__file__))

    from mozbuild.base import MozbuildObject
    build_obj = MozbuildObject.from_environment(cwd=here)
except ImportError:
    build_obj = None

def categoriesToRegex(categoryList):
    return "\\(" + ', '.join(["(?P<%s>\\d+) %s" % c for c in categoryList]) + "\\)"

summaryLines = [('Successful', [('pass', 'pass'), ('loadOnly', 'load only')]),
                ('Unexpected', [('fail', 'unexpected fail'),
                                ('pass', 'unexpected pass'),
                                ('asserts', 'unexpected asserts'),
                                ('fixedAsserts', 'unexpected fixed asserts'),
                                ('failedLoad', 'failed load'),
                                ('exception', 'exception')]),
                ('Known problems', [('knownFail', 'known fail'),
                                    ('knownAsserts', 'known asserts'),
                                    ('random', 'random'),
                                    ('skipped', 'skipped'),
                                    ('slow', 'slow')])]

def update_mozinfo():
    """walk up directories to find mozinfo.json update the info"""
    # TODO: This should go in a more generic place, e.g. mozinfo

    dirs = set()
    while path != os.path.expanduser('~'):
        if path in dirs:
        path = os.path.split(path)[0]

# Python's print is not threadsafe.
printLock = threading.Lock()

class ReftestThread(threading.Thread):
    def __init__(self, cmdargs):
        self.cmdargs = cmdargs
        self.summaryMatches = {}
        self.retcode = -1
        for text, _ in summaryLines:
            self.summaryMatches[text] = None

    def run(self):
        with printLock:
            print "Starting thread with", self.cmdargs
        process = subprocess.Popen(self.cmdargs, stdout=subprocess.PIPE)
        for chunk in self.chunkForMergedOutput(process.stdout):
            with printLock:
                print chunk,
        self.retcode = process.wait()

    def chunkForMergedOutput(self, logsource):
        """Gather lines together that should be printed as one atomic unit.
        Individual test results--anything between 'REFTEST TEST-START' and
        'REFTEST TEST-END' lines--are an atomic unit.  Lines with data from
        summaries are parsed and the data stored for later aggregation.
        Other lines are considered their own atomic units and are permitted
        to intermix freely."""
        testStartRegex = re.compile("^REFTEST TEST-START")
        testEndRegex = re.compile("^REFTEST TEST-END")
        summaryHeadRegex = re.compile("^REFTEST INFO \\| Result summary:")
        summaryRegexFormatString = "^REFTEST INFO \\| (?P<message>{text}): (?P<total>\\d+) {regex}"
        summaryRegexStrings = [summaryRegexFormatString.format(text=text,
                               for (text, categories) in summaryLines]
        summaryRegexes = [re.compile(regex) for regex in summaryRegexStrings]

        for line in logsource:
            if is not None:
                chunkedLines = [line]
                for lineToBeChunked in logsource:
                    if is not None:
                yield ''.join(chunkedLines)

            haveSuppressedSummaryLine = False
            for regex in summaryRegexes:
                match =
                if match is not None:
                    self.summaryMatches['message')] = match
                    haveSuppressedSummaryLine = True
            if haveSuppressedSummaryLine:

            if is None:
                yield line

class ReftestResolver(object):
    def defaultManifest(self, suite):
        return {"reftest": "reftest.list",
                "crashtest": "crashtests.list",
                "jstestbrowser": "jstests.list"}[suite]

    def directoryManifest(self, suite, path):
        return os.path.join(path, self.defaultManifest(suite))

    def findManifest(self, suite, test_file, subdirs=True):
        """Return a tuple of (manifest-path, filter-string) for running test_file.

        test_file is a path to a test or a manifest file
        rv = []
        default_manifest = self.defaultManifest(suite)
        if not os.path.isabs(test_file):
            test_file = self.absManifestPath(test_file)

        if os.path.isdir(test_file):
            for dirpath, dirnames, filenames in os.walk(test_file):
                if default_manifest in filenames:
                    rv.append((os.path.join(dirpath, default_manifest), None))
                    # We keep recursing into subdirectories which means that in the case
                    # of include directives we get the same manifest multiple times.
                    # However reftest.js will only read each manifest once

        elif test_file.endswith('.list'):
            if os.path.exists(test_file):
                rv = [(test_file, None)]
            dirname, pathname = os.path.split(test_file)
            found = True
            while not os.path.exists(os.path.join(dirname, default_manifest)):
                dirname, suffix = os.path.split(dirname)
                pathname = os.path.join(suffix, pathname)
                if os.path.dirname(dirname) == dirname:
                    found = False
            if found:
                rv = [(os.path.join(dirname, default_manifest),
                       r".*(?:/|\\)%s(?:[#?].*)?$" % pathname)]

        return rv

    def absManifestPath(self, path):
        return os.path.normpath(os.path.abspath(path))

    def manifestURL(self, options, path):
        return "file://%s" % path

    def resolveManifests(self, options, tests):
        suite = options.suite
        manifests = {}
        for testPath in tests:
            for manifest, filter_str in self.findManifest(suite, testPath):
                manifest = self.manifestURL(options, manifest)
                if manifest not in manifests:
                    manifests[manifest] = set()

        for key in manifests.iterkeys():
            if None in manifests[key]:
                manifests[key] = None
                manifests[key] = "|".join(list(manifests[key]))
        return manifests

class RefTest(object):
    TEST_SEEN_INITIAL = 'reftest'
    TEST_SEEN_FINAL = 'Main app process exited normally'
    use_marionette = True
    oldcwd = os.getcwd()
    resolver_cls = ReftestResolver

    def __init__(self):
        self.lastTestSeen = self.TEST_SEEN_INITIAL
        self.haveDumpedScreen = False
        self.resolver = self.resolver_cls()
        self.log = None

    def _populate_logger(self, options):
        if self.log:

        self.log = getattr(options, 'log', None)
        if self.log:

        mozlog.commandline.log_formatters["tbpl"] = (ReftestFormatter,
                                                     "Reftest specific formatter for the"
                                                     "benefit of legacy log parsers and"
                                                     "tools such as the reftest analyzer")
        fmt_options = {}
        if not options.log_tbpl_level and os.environ.get('MOZ_REFTEST_VERBOSE'):
            options.log_tbpl_level = fmt_options['level'] = 'debug'
        self.log = mozlog.commandline.setup_logging(
            "reftest harness", options, {"tbpl": sys.stdout}, fmt_options)

    def getFullPath(self, path):
        "Get an absolute path relative to self.oldcwd."
        return os.path.normpath(os.path.join(self.oldcwd, os.path.expanduser(path)))

    def createReftestProfile(self, options, manifests, server='localhost', port=0,
                             profile_to_clone=None, startAfter=None):
        """Sets up a profile for reftest.

        :param options: Object containing command line options
        :param manifests: Dictionary of the form {manifest_path: [filters]}
        :param server: Server name to use for http tests
        :param profile_to_clone: Path to a profile to use as the basis for the
                                 test profile

        locations = mozprofile.permissions.ServerLocations()
        locations.add_host(server, scheme='http', port=port)
        locations.add_host(server, scheme='https', port=port)

        # Set preferences for communication between our command line arguments
        # and the reftest harness.  Preferences that are required for reftest
        # to work should instead be set in reftest-preferences.js .
        prefs = {}
        prefs['reftest.timeout'] = options.timeout * 1000
        if options.totalChunks:
            prefs['reftest.totalChunks'] = options.totalChunks
        if options.thisChunk:
            prefs['reftest.thisChunk'] = options.thisChunk
        if options.logFile:
            prefs['reftest.logFile'] = options.logFile
        if options.ignoreWindowSize:
            prefs['reftest.ignoreWindowSize'] = True
        if options.shuffle:
            prefs['reftest.shuffle'] = True
        if options.repeat:
            prefs['reftest.repeat'] = options.repeat
        if options.runUntilFailure:
            prefs['reftest.runUntilFailure'] = True
        if options.cleanupCrashes:
            prefs['reftest.cleanupPendingCrashes'] = True
        prefs['reftest.focusFilterMode'] = options.focusFilterMode
        prefs['reftest.logLevel'] = options.log_tbpl_level or 'info'
        prefs['reftest.manifests'] = json.dumps(manifests)
        prefs['reftest.suite'] = options.suite

        if startAfter not in (None, self.TEST_SEEN_INITIAL, self.TEST_SEEN_FINAL):
  "Setting reftest.startAfter to %s" % startAfter)
            prefs['reftest.startAfter'] = startAfter

        # Unconditionally update the e10s pref.
        if options.e10s:
            prefs['browser.tabs.remote.autostart'] = True
            prefs['browser.tabs.remote.autostart'] = False

        # Bug 1262954: For winXP + e10s disable acceleration
        if platform.system() in ("Windows", "Microsoft") and \
           '5.1' in platform.version() and options.e10s:
            prefs['layers.acceleration.disabled'] = True

        sandbox_whitelist_paths = options.sandboxReadWhitelist
        if (platform.system() == "Linux" or
            platform.system() in ("Windows", "Microsoft")):
            # Trailing slashes are needed to indicate directories on Linux and Windows
            sandbox_whitelist_paths = map(lambda p: os.path.join(p, ""),

        # Bug 1300355: Disable canvas cache for win7 as it uses
        # too much memory and causes OOMs.
        if platform.system() in ("Windows", "Microsoft") and \
           '6.1' in platform.version():
            prefs['reftest.nocache'] = True

        if options.marionette:
            port = options.marionette.split(':')[1]
            prefs['marionette.defaultPrefs.port'] = int(port)

            # Enable tracing output for detailed failures in case of
            # failing connection attempts, and hangs (bug 1397201)
            prefs['marionette.logging'] = "TRACE"

        preference_file = os.path.join(here, 'reftest-preferences.js')

        for v in options.extraPrefs:
            thispref = v.split('=')
            if len(thispref) < 2:
                print "Error: syntax error in --setpref=" + v
            prefs[thispref[0]] = thispref[1].strip()

        addons = []
        if not self.use_marionette:

        if options.specialPowersExtensionPath is not None:
            if not self.use_marionette:

        for pref in prefs:
            prefs[pref] = mozprofile.Preferences.cast(prefs[pref])

        # Install distributed extensions, if application has any.
        distExtDir = os.path.join([],
                                  "distribution", "extensions")
        if os.path.isdir(distExtDir):
            for f in os.listdir(distExtDir):
                addons.append(os.path.join(distExtDir, f))

        # Install custom extensions.
        for f in options.extensionsToInstall:

        kwargs = {'addons': addons,
                  'preferences': prefs,
                  'locations': locations,
                  'whitelistpaths': sandbox_whitelist_paths}
        if profile_to_clone:
            profile = mozprofile.Profile.clone(profile_to_clone, **kwargs)
            profile = mozprofile.Profile(**kwargs)

        if os.path.join(here, 'chrome') not in options.extraProfileFiles:
            options.extraProfileFiles.append(os.path.join(here, 'chrome'))

        self.copyExtraFilesToProfile(options, profile)
        return profile

    def environment(self, **kwargs):
        kwargs['log'] = self.log
        return test_environment(**kwargs)

    def buildBrowserEnv(self, options, profileDir):
        browserEnv = self.environment(
            xrePath=options.xrePath, debugger=options.debugger)
        browserEnv["XPCOM_DEBUG_BREAK"] = "stack"
        if hasattr(options, "topsrcdir"):
            browserEnv["MOZ_DEVELOPER_REPO_DIR"] = options.topsrcdir
        if hasattr(options, "topobjdir"):
            browserEnv["MOZ_DEVELOPER_OBJ_DIR"] = options.topobjdir

            # Disable leak checking for reftests for now
            if "ASAN_OPTIONS" in browserEnv:
                browserEnv["ASAN_OPTIONS"] += ":detect_leaks=0"
                browserEnv["ASAN_OPTIONS"] = "detect_leaks=0"

        for v in options.environment:
            ix = v.find("=")
            if ix <= 0:
                print "Error: syntax error in --setenv=" + v
                return None
            browserEnv[v[:ix]] = v[ix + 1:]

        # Enable leaks detection to its own log file.
        self.leakLogFile = os.path.join(profileDir, "runreftest_leaks.log")
        browserEnv["XPCOM_MEM_BLOAT_LOG"] = self.leakLogFile
        return browserEnv

    def cleanup(self, profileDir):
        if profileDir:
            shutil.rmtree(profileDir, True)

    def verifyTests(self, tests, options):
        Support --verify mode: Run test(s) many times in a variety of
        configurations/environments in an effort to find intermittent


        # Number of times to repeat test(s) when running with --repeat
        VERIFY_REPEAT = 10
        # Number of times to repeat test(s) when running test in separate browser

        def step1():
            stepOptions = copy.deepcopy(options)
            stepOptions.repeat = VERIFY_REPEAT
            stepOptions.runUntilFailure = True
            result = self.runTests(tests, stepOptions)
            return result

        def step2():
            stepOptions = copy.deepcopy(options)
            for i in xrange(VERIFY_REPEAT_SINGLE_BROWSER):
                result = self.runTests(tests, stepOptions)
                if result != 0:
            return result

        def step3():
            stepOptions = copy.deepcopy(options)
            stepOptions.repeat = VERIFY_REPEAT
            stepOptions.runUntilFailure = True
            result = self.runTests(tests, stepOptions)
            return result

        def step4():
            stepOptions = copy.deepcopy(options)
            for i in xrange(VERIFY_REPEAT_SINGLE_BROWSER):
                result = self.runTests(tests, stepOptions)
                if result != 0:
            return result

        steps = [
            ("1. Run each test %d times in one browser." % VERIFY_REPEAT,
            ("2. Run each test %d times in a new browser each time." %
            ("3. Run each test %d times in one browser, in chaos mode." % VERIFY_REPEAT,
            ("4. Run each test %d times in a new browser each time, in chaos mode." %

        stepResults = {}
        for (descr, step) in steps:
            stepResults[descr] = "not run / incomplete"

        startTime =
        maxTime = timedelta(seconds=options.verify_max_time)
        finalResult = "PASSED"
        for (descr, step) in steps:
            if ( - startTime) > maxTime:
      "::: Test verification is taking too long: Giving up!")
      "::: So far, all checks passed, but not all checks were run.")
  '::: Running test verification step "%s"...' % descr)
            result = step()
            if result != 0:
                stepResults[descr] = "FAIL"
                finalResult = "FAILED!"
            stepResults[descr] = "Pass"':::')'::: Test verification summary for:')':::')
        for test in tests:
  '::: '+test)':::')
        for descr in sorted(stepResults.keys()):
  '::: %s : %s' % (descr, stepResults[descr]))':::')'::: Test verification %s' % finalResult)':::')

        return result

    def runTests(self, tests, options, cmdargs=None):
        cmdargs = cmdargs or []

        if options.cleanupCrashes:

        manifests = self.resolver.resolveManifests(options, tests)
        if options.filter:
            manifests[""] = options.filter

        if not getattr(options, 'runTestsInParallel', False):
            return self.runSerialTests(manifests, options, cmdargs)

        cpuCount = multiprocessing.cpu_count()

        # We have the directive, technology, and machine to run multiple test instances.
        # Experimentation says that reftests are not overly CPU-intensive, so we can run
        # multiple jobs per CPU core.
        # Our Windows machines in automation seem to get upset when we run a lot of
        # simultaneous tests on them, so tone things down there.
        if sys.platform == 'win32':
            jobsWithoutFocus = cpuCount
            jobsWithoutFocus = 2 * cpuCount

        totalJobs = jobsWithoutFocus + 1
        perProcessArgs = [sys.argv[:] for i in range(0, totalJobs)]

        host = 'localhost'
        port = 2828
        if options.marionette:
            host, port = options.marionette.split(':')

        # First job is only needs-focus tests.  Remaining jobs are
        # non-needs-focus and chunked.
        perProcessArgs[0].insert(-1, "--focus-filter-mode=needs-focus")
        for (chunkNumber, jobArgs) in enumerate(perProcessArgs[1:], start=1):
            jobArgs[-1:-1] = ["--focus-filter-mode=non-needs-focus",
                              "--total-chunks=%d" % jobsWithoutFocus,
                              "--this-chunk=%d" % chunkNumber,
                              "--marionette=%s:%d" % (host, port)]
            port += 1

        for jobArgs in perProcessArgs:
            jobArgs[0:0] = [sys.executable, "-u"]

        threads = [ReftestThread(args) for args in perProcessArgs[1:]]
        for t in threads:

        while True:
            # The test harness in each individual thread will be doing timeout
            # handling on its own, so we shouldn't need to worry about any of
            # the threads hanging for arbitrarily long.
            for t in threads:
            if not any(t.is_alive() for t in threads):

        # Run the needs-focus tests serially after the other ones, so we don't
        # have to worry about races between the needs-focus tests *actually*
        # needing focus and the dummy windows in the non-needs-focus tests
        # trying to focus themselves.
        focusThread = ReftestThread(perProcessArgs[0])

        # Output the summaries that the ReftestThread filters suppressed.
        summaryObjects = [collections.defaultdict(int) for s in summaryLines]
        for t in threads:
            for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
                threadMatches = t.summaryMatches[text]
                for (attribute, description) in categories:
                    amount = int(
               if threadMatches else 0)
                    summaryObj[attribute] += amount
                amount = int(
          'total') if threadMatches else 0)
                summaryObj['total'] += amount

        print 'REFTEST INFO | Result summary:'
        for (summaryObj, (text, categories)) in zip(summaryObjects, summaryLines):
            details = ', '.join(["%d %s" % (summaryObj[attribute], description) for (
                attribute, description) in categories])
            print 'REFTEST INFO | ' + text + ': ' + str(summaryObj['total']) + ' (' + details + ')'

        return int(any(t.retcode != 0 for t in threads))

    def handleTimeout(self, timeout, proc, utilityPath, debuggerInfo):
        """handle process output timeout"""
        # TODO: bug 913975 : _processOutput should call self.processOutputLine
        # one more time one timeout (I think)
        self.log.error("%s | application timed out after %d seconds with no output" % (
                       self.lastTestSeen, int(timeout)))
        self.log.error("Force-terminating active process(es).")
            proc, utilityPath, debuggerInfo, dump_screen=not debuggerInfo)

    def dumpScreen(self, utilityPath):
        if self.haveDumpedScreen:
  "Not taking screenshot here: see the one that was previously logged")
        self.haveDumpedScreen = True
        dump_screen(utilityPath, self.log)

    def killAndGetStack(self, process, utilityPath, debuggerInfo, dump_screen=False):
        Kill the process, preferrably in a way that gets us a stack trace.
        Also attempts to obtain a screenshot before killing the process
        if specified.

        if dump_screen:

        if'crashreporter', True) and not debuggerInfo:
            if mozinfo.isWin:
                # We should have a "crashinject" program in our utility path
                crashinject = os.path.normpath(
                    os.path.join(utilityPath, "crashinject.exe"))
                if os.path.exists(crashinject):
                    status = subprocess.Popen(
                        [crashinject, str(]).wait()
                    printstatus("crashinject", status)
                    if status == 0:
                except OSError:
          "Can't trigger Breakpad, process no longer exists")
                return"Can't trigger Breakpad, just killing process")

    def runApp(self, profile, binary, cmdargs, env,
               timeout=None, debuggerInfo=None,
               symbolsPath=None, options=None,
               valgrindPath=None, valgrindArgs=None, valgrindSuppFiles=None):

        def timeoutHandler():
                timeout, proc, options.utilityPath, debuggerInfo)

        interactive = False
        debug_args = None
        if debuggerInfo:
            interactive = debuggerInfo.interactive
            debug_args = [debuggerInfo.path] + debuggerInfo.args

        def record_last_test(message):
            """Records the last test seen by this harness for the benefit of crash logging."""
            if message['action'] == 'test_start':
                if " " in message['test']:
                    self.lastTestSeen = message['test'].split(" ")[0]
                    self.lastTestSeen = message['test']


        outputHandler = OutputHandler(self.log, options.utilityPath, symbolsPath=symbolsPath)

        kp_kwargs = {
            'kill_on_timeout': False,
            'cwd': SCRIPT_DIRECTORY,
            'onTimeout': [timeoutHandler],
            'processOutputLine': [outputHandler],

        if mozinfo.isWin:
            # Prevents log interleaving on Windows at the expense of losing
            # true log order. See bug 798300 and bug 1324961 for more details.
            kp_kwargs['processStderrLine'] = [outputHandler]

        if interactive:
            # If an interactive debugger is attached,
            # don't use timeouts, and don't capture ctrl-c.
            timeout = None
            signal.signal(signal.SIGINT, lambda sigid, frame: None)

        runner_cls = mozrunner.runners.get('appname', 'firefox'),
        runner = runner_cls(profile=profile,
        proc = runner.process_handler
        outputHandler.proc_name = 'GECKO({})'.format(

        # Used to defer a possible IOError exception from Marionette
        marionette_exception = None

        if self.use_marionette:
            marionette_args = {
                'socket_timeout': options.marionette_socket_timeout,
                'startup_timeout': options.marionette_startup_timeout,
                'symbols_path': options.symbolsPath,
            if options.marionette:
                host, port = options.marionette.split(':')
                marionette_args['host'] = host
                marionette_args['port'] = int(port)

                marionette = Marionette(**marionette_args)

                addons = Addons(marionette)
                if options.specialPowersExtensionPath:
                    addons.install(options.specialPowersExtensionPath, temp=True)

                addons.install(options.reftestExtensionPath, temp=True)

            except IOError:
                # Any IOError as thrown by Marionette means that something is
                # wrong with the process, like a crash or the socket is no
                # longer open. We defer raising this specific error so that
                # post-test checks for leaks and crashes are performed and
                # reported first.
                marionette_exception = sys.exc_info()

        status = runner.wait()
        runner.process_handler = None
        outputHandler.proc_name = None

        if status:
            msg = "TEST-UNEXPECTED-FAIL | %s | application terminated with exit code %s" % \
                (self.lastTestSeen, status)
            # use process_output so message is logged verbatim
            self.log.process_output(None, msg)
            self.lastTestSeen = self.TEST_SEEN_FINAL

        crashed = mozcrash.log_crashes(self.log, os.path.join(profile.profile, 'minidumps'),
                                       symbolsPath, test=self.lastTestSeen)
        if not status and crashed:
            status = 1


        if marionette_exception is not None:
            exc, value, tb = marionette_exception
            raise exc, value, tb

        return status, self.lastTestSeen

    def runSerialTests(self, manifests, options, cmdargs=None):
        debuggerInfo = None
        if options.debugger:
            debuggerInfo = mozdebug.get_debugger_info(options.debugger, options.debuggerArgs,

        profileDir = None
        startAfter = None  # When the previous run crashed, we skip the tests we ran before
        prevStartAfter = None
        for i in itertools.count():
                if cmdargs is None:
                    cmdargs = []

                if self.use_marionette:

                profile = self.createReftestProfile(options,
                profileDir = profile.profile  # name makes more sense

                # browser environment
                browserEnv = self.buildBrowserEnv(options, profileDir)

      "Running with e10s: {}".format(options.e10s))
                status, startAfter = self.runApp(profile,
                                                 # We generally want the JS harness or marionette
                                                 # to handle timeouts if they can.
                                                 # The default JS harness timeout is currently
                                                 # 300 seconds (default options.timeout).
                                                 # The default Marionette socket timeout is
                                                 # currently 360 seconds.
                                                 # Give the JS harness extra time to deal with
                                                 # its own timeouts and try to usually exceed
                                                 # the 360 second marionette socket timeout.
                                                 # See bug 479518 and bug 1414063.
                                                 timeout=options.timeout + 70.0,
      "Process mode: {}".format('e10s' if options.e10s else 'non-e10s'))
                if status == 0:

                if startAfter == self.TEST_SEEN_FINAL:
          "Finished running all tests, skipping resume "
                                  "despite non-zero status code: %s" % status)

                if startAfter is not None and options.shuffle:
                    self.log.error("Can not resume from a crash with --shuffle "
                                   "enabled. Please consider disabling --shuffle")
                if startAfter is not None and options.maxRetries <= i:
                    self.log.error("Hit maximum number of allowed retries ({}) "
                                   "in the test run".format(options.maxRetries))
                if startAfter == prevStartAfter:
                    # If the test stuck on the same test, or there the crashed
                    # test appeared more then once, stop
                    self.log.error("Force stop because we keep running into "
                                   "test \"{}\"".format(startAfter))
                prevStartAfter = startAfter
                # TODO: we need to emit an SUITE-END log if it crashed
        return status

    def copyExtraFilesToProfile(self, options, profile):
        "Copy extra files or dirs specified on the command line to the testing profile."
        profileDir = profile.profile
        for f in options.extraProfileFiles:
            abspath = self.getFullPath(f)
            if os.path.isfile(abspath):
                if os.path.basename(abspath) == 'user.js':
                    extra_prefs = mozprofile.Preferences.read_prefs(abspath)
                elif os.path.basename(abspath).endswith('.dic'):
                    hyphDir = os.path.join(profileDir, "hyphenation")
                    if not os.path.exists(hyphDir):
                    shutil.copy2(abspath, hyphDir)
                    shutil.copy2(abspath, profileDir)
            elif os.path.isdir(abspath):
                dest = os.path.join(profileDir, os.path.basename(abspath))
                shutil.copytree(abspath, dest)
                    " | Failed to copy %s to profile" % abspath)

def run_test_harness(parser, options):
    reftest = RefTest()
    parser.validate(options, reftest)

    # We have to validate here for the case when the mach
    # command is able to find it after argument parsing. This can happen
    # when running from a
    if not
        parser.error("could not find the application path, --appname must be specified") = reftest.getFullPath(
    if not os.path.exists(
        parser.error("Error: Path %(app)s doesn't exist. Are you executing "
                     "$objdir/_tests/reftest/" % {"app":})

    if options.xrePath is None:
        options.xrePath = os.path.dirname(

    if options.verify:
        result = reftest.verifyTests(options.tests, options)
        result = reftest.runTests(options.tests, options)

    return result

if __name__ == "__main__":
    parser = reftestcommandline.DesktopArgumentsParser()
    options = parser.parse_args()
    sys.exit(run_test_harness(parser, options))