author | Andrew Halberstadt <ahalberstadt@mozilla.com> |
Fri, 05 Jun 2015 13:28:29 -0400 | |
changeset 249632 | 92faa039f31b16d6de95cc3355688d8f492e8bae |
parent 249631 | 656ea3bfbb4ebc7273001d31d8d00fce04fa83e2 |
child 249633 | 43e3ece52f5572b6a11b39d15d89eb4d6620a580 |
push id | 61298 |
push user | ahalberstadt@mozilla.com |
push date | Thu, 18 Jun 2015 20:28:24 +0000 |
treeherder | mozilla-inbound@43e3ece52f55 [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | chmanchester |
bugs | 1171971 |
milestone | 41.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/testing/config/mozharness/b2g_emulator_config.py +++ b/testing/config/mozharness/b2g_emulator_config.py @@ -67,17 +67,17 @@ config = { "--xre-path=%(xre_path)s", "--symbols-path=%(symbols_path)s", "--busybox=%(busybox)s", "--total-chunks=%(total_chunks)s", "--this-chunk=%(this_chunk)s", "--quiet", "--log-raw=%(raw_log_file)s", "--certificate-path=%(certificate_path)s", - "--test-path=%(test_path)s" + "%(test_path)s" ], "run_filename": "runtestsb2g.py", "testsdir": "mochitest" }, "mochitest-chrome": { "options": [ "--adbpath=%(adbpath)s", "--b2gpath=%(b2gpath)s", @@ -88,17 +88,17 @@ config = { "--symbols-path=%(symbols_path)s", "--busybox=%(busybox)s", "--total-chunks=%(total_chunks)s", "--this-chunk=%(this_chunk)s", "--quiet", "--chrome", "--log-raw=%(raw_log_file)s", "--certificate-path=%(certificate_path)s", - "--test-path=%(test_path)s" + "%(test_path)s" ], "run_filename": "runtestsb2g.py", "testsdir": "mochitest" }, "reftest": { "options": [ "--adbpath=%(adbpath)s", "--b2gpath=%(b2gpath)s",
--- a/testing/mochitest/bisection.py +++ b/testing/mochitest/bisection.py @@ -5,18 +5,16 @@ import mozinfo class Bisect(object): "Class for creating, bisecting and summarizing for --bisect-chunk option." def __init__(self, harness): super(Bisect, self).__init__() self.summary = [] self.contents = {} - self.testRoot = harness.testRoot - self.testRootAbs = harness.testRootAbs self.repeat = 10 self.failcount = 0 self.max_failures = 3 def setup(self, tests): "This method is used to initialize various variables that are required for test bisection" status = 0 self.contents.clear()
--- a/testing/mochitest/browser-harness.xul +++ b/testing/mochitest/browser-harness.xul @@ -95,24 +95,17 @@ if (this.fileLogger) this.fileLogger.close(); } } function TestStart() { gConfig = readConfig(); - // If MochiTest was started with the --test-path flag specifying a subset - // of tests to run, put that path in the label of the "Run Tests" button - // so the tester knows which tests will run when they press that button. - if (gConfig.testPath) - document.getElementById("runTestsButton").label = - "Run " + gConfig.testPath + " tests"; - - // Similarly, update the title for --start-at and --end-at. + // Update the title for --start-at and --end-at. if (gConfig.startAt || gConfig.endAt) document.getElementById("runTestsButton").label = "Run subset of tests"; if (gConfig.autorun) setTimeout(runTests, 0); }
--- a/testing/mochitest/browser-test.js +++ b/testing/mochitest/browser-test.js @@ -345,17 +345,17 @@ Tester.prototype = { this.dumper.structuredLogger.info("Browser Chrome Test Summary"); this.dumper.structuredLogger.info("Passed: " + passCount); this.dumper.structuredLogger.info("Failed: " + failCount); this.dumper.structuredLogger.info("Todo: " + todoCount); } else { this.dumper.structuredLogger.testEnd("browser-test.js", "FAIL", "PASS", - "No tests to run. Did you pass an invalid --test-path?"); + "No tests to run. Did you pass invalid test_paths?"); } this.dumper.structuredLogger.info("*** End BrowserChrome Test Results ***"); this.dumper.done(); // Tests complete, notify the callback and return this.callback(this.tests); this.callback = null;
--- a/testing/mochitest/chrome-harness.js +++ b/testing/mochitest/chrome-harness.js @@ -52,72 +52,16 @@ function getResolvedURI(url) { function getChromeDir(resolvedURI) { var fileHandler = Components.classes["@mozilla.org/network/protocol;1?name=file"]. getService(Components.interfaces.nsIFileProtocolHandler); var chromeDir = fileHandler.getFileFromURLSpec(resolvedURI.spec); return chromeDir.parent.QueryInterface(Components.interfaces.nsILocalFile); } -/** - * basePath: the URL base path to search from such as chrome://mochikit/content/a11y - * testPath: the optional testPath passed into the test such as dom/tests/mochitest - * dir: the test dir to append to the uri after getting a directory interface - * srvScope: loaded javascript to server.js so we have aComponents.classesess to the list() function - * - * return value: - * single test: [json object, path to test] - * list of tests: [json object, null] <- directory [heirarchy] - */ -function getFileListing(basePath, testPath, dir, srvScope) -{ - var uri = getResolvedURI(basePath); - var chromeDir = getChromeDir(uri); - chromeDir.appendRelativePath(dir); - basePath += '/' + dir.replace(/\\/g, '/'); - - if (testPath == "false" || testPath == false) { - testPath = ""; - } - testPath = testPath.replace(/\\\\/g, '\\').replace(/\\/g, '/'); - - var ioSvc = Components.classes["@mozilla.org/network/io-service;1"]. - getService(Components.interfaces.nsIIOService); - var testsDirURI = ioSvc.newFileURI(chromeDir); - var testsDir = ioSvc.newURI(testPath, null, testsDirURI) - .QueryInterface(Components.interfaces.nsIFileURL).file; - - if (testPath != undefined) { - var extraPath = testPath; - - var fileNameRegexp = /(browser|test)_.+\.(xul|html|js)$/; - - // Invalid testPath... - if (!testsDir.exists()) - return null; - - if (testsDir.isFile()) { - if (fileNameRegexp.test(testsDir.leafName)) { - var singlePath = basePath + '/' + testPath; - var links = {}; - links[singlePath] = true; - return links; - } - // We were passed a file that's not a test... - return null; - } - - // otherwise, we were passed a directory of tests - basePath += "/" + testPath; - } - var [links, count] = srvScope.list(basePath, testsDir, true); - return links; -} - - //used by tests to determine their directory based off window.location.path function getRootDirectory(path, chromeURI) { if (chromeURI === undefined) { chromeURI = getChromeURI(path); } var myURL = chromeURI.QueryInterface(Components.interfaces.nsIURL); var mydir = myURL.directory; @@ -308,24 +252,11 @@ function getTestList(params, callback) { config[p] = true; } else if (params[p] == 0) { config[p] = false; } else { config[p] = params[p]; } } params = config; - if (params.manifestFile) { - getTestManifest("http://mochi.test:8888/" + params.manifestFile, params, callback); - return; - } - - var links = {}; - // load server.js in so we can share template functions - var scriptLoader = Cc["@mozilla.org/moz/jssubscript-loader;1"]. - getService(Ci.mozIJSSubScriptLoader); - var srvScope = {}; - scriptLoader.loadSubScript('chrome://mochikit/content/server.js', - srvScope); - - links = getFileListing(baseurl, params.testPath, params.testRoot, srvScope); - callback(links); + getTestManifest("http://mochi.test:8888/" + params.manifestFile, params, callback); + return; }
--- a/testing/mochitest/jetpack-addon-harness.js +++ b/testing/mochitest/jetpack-addon-harness.js @@ -192,17 +192,17 @@ function testInit() { } let passed = 0; let failed = 0; function finish() { if (passed + failed == 0) { dump("TEST-UNEXPECTED-FAIL | jetpack-addon-harness.js | " + - "No tests to run. Did you pass an invalid --test-path?\n"); + "No tests to run. Did you pass invalid test_paths?\n"); } else { dump("Jetpack Addon Test Summary\n"); dump("\tPassed: " + passed + "\n" + "\tFailed: " + failed + "\n" + "\tTodo: 0\n"); }
--- a/testing/mochitest/jetpack-package-harness.js +++ b/testing/mochitest/jetpack-package-harness.js @@ -213,17 +213,17 @@ function testInit() { // Wait until the add-on window is ready require("sdk/addon/window").ready.then(() => { let passed = 0; let failed = 0; function finish() { if (passed + failed == 0) { dump("TEST-UNEXPECTED-FAIL | jetpack-package-harness.js | " + - "No tests to run. Did you pass an invalid --test-path?\n"); + "No tests to run. Did you pass invalid test_paths?\n"); } else { dump("Jetpack Package Test Summary\n"); dump("\tPassed: " + passed + "\n" + "\tFailed: " + failed + "\n" + "\tTodo: 0\n"); }
--- a/testing/mochitest/mach_commands.py +++ b/testing/mochitest/mach_commands.py @@ -231,20 +231,16 @@ class MochitestRunner(MozbuildObject): 'testing', 'mochitest') self.bin_dir = os.path.join(self.topobjdir, 'dist', 'bin') def resolve_tests(self, test_paths, test_objects=None, cwd=None): if test_objects: return test_objects - # Ensure test paths are relative to topobjdir or topsrcdir. - test_paths = test_paths or [] - test_paths = [self._wrap_path_argument(tp).relpath() for tp in test_paths] - from mozbuild.testing import TestResolver resolver = self._spawn(TestResolver) tests = list(resolver.resolve_tests(paths=test_paths, cwd=cwd)) return tests def run_b2g_test(self, context, tests=None, suite='mochitest', **kwargs): """Runs a b2g mochitest.""" if kwargs.get('desktop'): @@ -435,22 +431,17 @@ class MachCommands(MachCommandBase): @Command('mochitest', category='testing', conditions=[is_buildapp_in(*SUPPORTED_APPS)], description='Run any flavor of mochitest (integration test).', parser=setup_argument_parser) @CommandArgument('-f', '--flavor', metavar='{{{}}}'.format(', '.join(CANONICAL_FLAVORS)), choices=SUPPORTED_FLAVORS, help='Only run tests of this flavor.') - @CommandArgument('test_paths', nargs='*', metavar='TEST', default=None, - help='Test to run. Can be a single test file or a directory of tests ' - '(to run recursively). If omitted, the entire suite is run.') - def run_mochitest_general(self, test_paths, flavor=None, test_objects=None, - **kwargs): - + def run_mochitest_general(self, flavor=None, test_objects=None, **kwargs): buildapp = None for app in SUPPORTED_APPS: if is_buildapp_in(app)(self): buildapp = app break flavors = None if flavor: @@ -464,29 +455,32 @@ class MachCommands(MachCommandBase): flavors = [f for f, v in ALL_FLAVORS.iteritems() if buildapp in v['enabled_apps']] from mozbuild.controller.building import BuildDriver self._ensure_state_subdir_exists('.') driver = self._spawn(BuildDriver) driver.install_tests(remove=False) + test_paths = kwargs['test_paths'] + kwargs['test_paths'] = [] + if test_paths and buildapp == 'b2g': # In B2G there is often a 'gecko' directory, though topsrcdir is actually # elsewhere. This little hack makes test paths like 'gecko/dom' work, even if # GECKO_PATH is set in the .userconfig gecko_path = mozpath.abspath(mozpath.join(kwargs['b2gPath'], 'gecko')) if gecko_path != self.topsrcdir: - old_paths = test_paths[:] - test_paths = [] - for tp in old_paths: + new_paths = [] + for tp in test_paths: if mozpath.abspath(tp).startswith(gecko_path): - test_paths.append(mozpath.relpath(tp, gecko_path)) + new_paths.append(mozpath.relpath(tp, gecko_path)) else: - test_paths.append(tp) + new_paths.append(tp) + test_paths = new_paths mochitest = self._spawn(MochitestRunner) tests = mochitest.resolve_tests(test_paths, test_objects, cwd=self._mach_context.cwd) subsuite = kwargs.get('subsuite') if subsuite == 'default': kwargs['subsuite'] = None @@ -571,24 +565,20 @@ class MachCommands(MachCommandBase): @CommandProvider class RobocopCommands(MachCommandBase): @Command('robocop', category='testing', conditions=[conditions.is_android], description='Run a Robocop test.', parser=setup_argument_parser) - @CommandArgument('test_paths', nargs='*', metavar='TEST', default=None, - help='Test to run. Can be a single Robocop test file (like "testLoad.java") ' - ' or a directory of tests ' - '(to run recursively). If omitted, the entire Robocop suite is run.') @CommandArgument('--serve', default=False, action='store_true', help='Run no tests but start the mochi.test web server and launch ' 'Fennec with a test profile.') - def run_robocop(self, test_paths, serve=False, **kwargs): + def run_robocop(self, serve=False, **kwargs): if serve: kwargs['autorun'] = False if not kwargs.get('robocopIni'): kwargs['robocopIni'] = os.path.join(self.topobjdir, '_tests', 'testing', 'mochitest', 'robocop.ini') if not kwargs.get('robocopApk'): @@ -596,16 +586,19 @@ class RobocopCommands(MachCommandBase): 'robocop', 'robocop-debug.apk') from mozbuild.controller.building import BuildDriver self._ensure_state_subdir_exists('.') driver = self._spawn(BuildDriver) driver.install_tests(remove=False) + test_paths = kwargs['test_paths'] + kwargs['test_paths'] = [] + from mozbuild.testing import TestResolver resolver = self._spawn(TestResolver) tests = list(resolver.resolve_tests(paths=test_paths, cwd=self._mach_context.cwd, flavor='instrumentation', subsuite='robocop')) mochitest = self._spawn(MochitestRunner) return mochitest.run_android_test(self._mach_context, tests, 'robocop', **kwargs)
--- a/testing/mochitest/mochitest_options.py +++ b/testing/mochitest/mochitest_options.py @@ -53,16 +53,23 @@ class ArgumentContainer(): class MochitestArguments(ArgumentContainer): """General mochitest arguments.""" LOG_LEVELS = ("DEBUG", "INFO", "WARNING", "ERROR", "FATAL") LEVEL_STRING = ", ".join(LOG_LEVELS) args = [ + [["test_paths"], + {"nargs": "*", + "metavar": "TEST", + "default": [], + "help": "Test to run. Can be a single test file or a directory of tests " + "(to run recursively). If omitted, the entire suite is run.", + }], [["--keep-open"], {"action": "store_false", "dest": "closeWhenDone", "default": True, "help": "Always keep the browser open after tests complete.", }], [["--appname"], {"dest": "app", @@ -153,23 +160,16 @@ class MochitestArguments(ArgumentContain }], [["--ipcplugins"], {"action": "store_true", "dest": "ipcplugins", "help": "Run ipcplugins mochitests.", "default": False, "suppress": True, }], - [["--test-path"], - {"dest": "testPath", - "default": "", - "help": "Run the given test or recursively run the given directory of tests.", - # if running from mach, a test_paths arg is exposed instead - "suppress": build_obj is not None, - }], [["--bisect-chunk"], {"dest": "bisectChunk", "default": None, "help": "Specify the failing test name to find the previous tests that may be " "causing the failure.", }], [["--start-at"], {"dest": "startAt", @@ -547,16 +547,19 @@ class MochitestArguments(ArgumentContain gmp_modules = ( ('gmp-fake', '1.0'), ('gmp-clearkey', '0.1'), ('gmp-fakeopenh264', '1.0') ) options.gmp_path = os.pathsep.join( os.path.join(build_obj.bindir, *p) for p in gmp_modules) + if options.ipcplugins: + options.test_paths.append('dom/plugins/test/mochitest') + if options.totalChunks is not None and options.thisChunk is None: parser.error( "thisChunk must be specified when totalChunks is specified") if options.totalChunks: if not 1 <= options.thisChunk <= options.totalChunks: parser.error("thisChunk must be between 1 and totalChunks") @@ -723,16 +726,25 @@ class MochitestArguments(ArgumentContain "geckomediaplugin": 20000, } # Bug 1091917 - We exit early in tab processes on Windows, so we don't # get leak logs yet. if mozinfo.isWin: options.ignoreMissingLeaks.append("tab") + # XXX We can't normalize test_paths in the non build_obj case here, + # because testRoot depends on the flavor, which is determined by the + # mach command and therefore not finalized yet. Conversely, test paths + # need to be normalized here for the mach case. + if options.test_paths and build_obj: + # Normalize test paths so they are relative to test root + options.test_paths = [build_obj._wrap_path_argument(p).relpath() + for p in options.test_paths] + return options class B2GArguments(ArgumentContainer): """B2G specific arguments.""" args = [ [["--b2gpath"],
--- a/testing/mochitest/runtests.py +++ b/testing/mochitest/runtests.py @@ -8,17 +8,16 @@ Runs the Mochitest test harness. from __future__ import with_statement import os import sys SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__))) sys.path.insert(0, SCRIPT_DIR) from argparse import Namespace -from urlparse import urlparse import ctypes import glob import json import mozcrash import mozdebug import mozinfo import mozprocess import mozrunner @@ -46,34 +45,29 @@ from automationutils import ( ) from datetime import datetime from manifestparser import TestManifest from manifestparser.filters import ( chunk_by_dir, chunk_by_runtime, chunk_by_slice, + pathprefix, subsuite, tags, ) -from mochitest_options import MochitestArgumentParser +from mochitest_options import MochitestArgumentParser, build_obj from mozprofile import Profile, Preferences from mozprofile.permissions import ServerLocations from urllib import quote_plus as encodeURIComponent from mozlog.structured.formatters import TbplFormatter from mozlog.structured import commandline here = os.path.abspath(os.path.dirname(__file__)) -try: - from mozbuild.base import MozbuildObject - build_obj = MozbuildObject.from_environment(cwd=here) -except ImportError: - build_obj = None - ########################### # Option for NSPR logging # ########################### # Set the desired log modules you want an NSPR log be produced by a try run for, or leave blank to disable the feature. # This will be passed to NSPR_LOG_MODULES environment variable. Try run will then put a download link for the log file # on tbpl.mozilla.org. @@ -633,24 +627,24 @@ class MochitestUtilsMixin(object): self.urlOpts.append("shuffle=1") if "MOZ_HIDE_RESULTS_TABLE" in env and env[ "MOZ_HIDE_RESULTS_TABLE"] == "1": self.urlOpts.append("hideResultsTable=1") if options.runUntilFailure: self.urlOpts.append("runUntilFailure=1") if options.repeat: self.urlOpts.append("repeat=%d" % options.repeat) - if os.path.isfile( + if len(options.test_paths) == 1 and options.repeat > 0 and os.path.isfile( os.path.join( self.oldcwd, os.path.dirname(__file__), self.TEST_PATH, - options.testPath)) and options.repeat > 0: - self.urlOpts.append("testname=%s" % - ("/").join([self.TEST_PATH, options.testPath])) + options.test_paths[0])): + self.urlOpts.append("testname=%s" % "/".join( + [self.TEST_PATH, options.test_paths[0]])) if options.manifestFile: self.urlOpts.append("manifestFile=%s" % options.manifestFile) if options.failureFile: self.urlOpts.append( "failureFile=%s" % self.getFullPath( options.failureFile)) if options.runSlower: @@ -711,60 +705,53 @@ class MochitestUtilsMixin(object): if not allow_js_css and (".js" in filename or ".css" in filename): return False pathPieces = filename.split("/") return (testPattern.match(pathPieces[-1]) and not re.search(r'\^headers\^$', filename)) - def getTestPath(self, options): - if options.ipcplugins: - return "dom/plugins/test/mochitest" - else: - return options.testPath - def setTestRoot(self, options): - if hasattr(self, "testRoot"): - return self.testRoot, self.testRootAbs + if options.browserChrome: + if options.immersiveMode: + self.testRoot = 'metro' + else: + self.testRoot = 'browser' + elif options.jetpackPackage: + self.testRoot = 'jetpack-package' + elif options.jetpackAddon: + self.testRoot = 'jetpack-addon' + elif options.a11y: + self.testRoot = 'a11y' + elif options.webapprtChrome: + self.testRoot = 'webapprtChrome' + elif options.webapprtContent: + self.testRoot = 'webapprtContent' + elif options.chrome: + self.testRoot = 'chrome' else: - if options.browserChrome: - if options.immersiveMode: - self.testRoot = 'metro' - else: - self.testRoot = 'browser' - elif options.jetpackPackage: - self.testRoot = 'jetpack-package' - elif options.jetpackAddon: - self.testRoot = 'jetpack-addon' - elif options.a11y: - self.testRoot = 'a11y' - elif options.webapprtChrome: - self.testRoot = 'webapprtChrome' - elif options.webapprtContent: - self.testRoot = 'webapprtContent' - elif options.chrome: - self.testRoot = 'chrome' - else: - self.testRoot = self.TEST_PATH - self.testRootAbs = os.path.join(SCRIPT_DIR, self.testRoot) + self.testRoot = self.TEST_PATH + self.testRootAbs = os.path.join(SCRIPT_DIR, self.testRoot) def buildTestURL(self, options): testHost = "http://mochi.test:8888" - testPath = self.getTestPath(options) - testURL = "/".join([testHost, self.TEST_PATH, testPath]) - if os.path.isfile( - os.path.join( - self.oldcwd, - os.path.dirname(__file__), - self.TEST_PATH, - testPath)) and options.repeat > 0: - testURL = "/".join([testHost, - self.TEST_PATH, - os.path.dirname(testPath)]) + testURL = "/".join([testHost, self.TEST_PATH]) + + if len(options.test_paths) == 1 : + if options.repeat > 0 and os.path.isfile( + os.path.join( + self.oldcwd, + os.path.dirname(__file__), + self.TEST_PATH, + options.test_paths[0])): + testURL = "/".join([testURL, os.path.dirname(options.test_paths[0])]) + else: + testURL = "/".join([testURL, options.test_paths[0]]) + if options.chrome or options.a11y: testURL = "/".join([testHost, self.CHROME_PATH]) elif options.browserChrome or options.jetpackPackage or options.jetpackAddon: testURL = "about:blank" if options.nested_oop: testURL = "/".join([testHost, self.NESTED_OOP_TEST_PATH]) return testURL @@ -1693,17 +1680,16 @@ class Mochitest(MochitestUtilsMixin): extraArgs, utilityPath, debuggerInfo=None, symbolsPath=None, timeout=-1, onLaunch=None, detectShutdownLeaks=False, screenshotOnFail=False, - testPath=None, bisectChunk=None, quiet=False): """ Run the app, log the duration it took to execute, return the status code. Kills the app if it runs for longer than |maxTime| seconds, or outputs nothing for |timeout| seconds. """ # configure the message logger buffering @@ -1770,18 +1756,17 @@ class Mochitest(MochitestUtilsMixin): def timeoutHandler(): browserProcessId = outputHandler.browserProcessId self.handleTimeout( timeout, proc, utilityPath, debuggerInfo, - browserProcessId, - testPath) + browserProcessId) kp_kwargs = {'kill_on_timeout': False, 'cwd': SCRIPT_DIR, 'onTimeout': [timeoutHandler]} kp_kwargs['processOutputLine'] = [outputHandler] # create mozrunner instance and start the system under test process self.lastTestSeen = self.test_name startTime = datetime.now() @@ -1891,117 +1876,109 @@ class Mochitest(MochitestUtilsMixin): flavor = 'plain' base = 'mochitest' if options.e10s: base = '{}-e10s'.format(base) return os.path.join(data_dir, '{}-{}.runtimes.json'.format( base, flavor)) + def normalize_paths(self, paths): + # Normalize test paths so they are relative to test root + norm_paths = [] + for p in paths: + abspath = os.path.abspath(os.path.join(self.oldcwd, p)) + if abspath.startswith(self.testRootAbs): + norm_paths.append(os.path.relpath(abspath, self.testRootAbs)) + else: + norm_paths.append(p) + return norm_paths def getActiveTests(self, options, disabled=True): """ This method is used to parse the manifest and return active filtered tests. """ if self._active_tests: return self._active_tests - self.setTestRoot(options) manifest = self.getTestManifest(options) if manifest: info = mozinfo.info - # Bug 883858 - return all tests including disabled tests - testPath = self.getTestPath(options) - testPath = testPath.replace('\\', '/') - if testPath.endswith('.html') or \ - testPath.endswith('.xhtml') or \ - testPath.endswith('.xul') or \ - testPath.endswith('.js'): - # In the case where we have a single file, we don't want to - # filter based on options such as subsuite. - tests = manifest.active_tests( - exists=False, disabled=disabled, **info) - for test in tests: - if 'disabled' in test: - del test['disabled'] - - else: - # Bug 1089034 - imptest failure expectations are encoded as - # test manifests, even though they aren't tests. This gross - # hack causes several problems in automation including - # throwing off the chunking numbers. Remove them manually - # until bug 1089034 is fixed. - def remove_imptest_failure_expectations(tests, values): - return (t for t in tests - if 'imptests/failures' not in t['path']) - - filters = [ - remove_imptest_failure_expectations, - subsuite(options.subsuite), - ] - - # Add chunking filters if specified - if options.totalChunks: - if options.chunkByRuntime: - runtime_file = self.resolve_runtime_file(options, info) - if not os.path.exists(runtime_file): - self.log.warning("runtime file %s not found; defaulting to chunk-by-dir" % - runtime_file) - options.chunkByRuntime = None - flavor = self.getTestFlavor(options) - if flavor in ('browser-chrome', 'devtools-chrome'): - # these values match current mozharness configs - options.chunkbyDir = 5 - else: - options.chunkByDir = 4 - - if options.chunkByDir: - filters.append(chunk_by_dir(options.thisChunk, - options.totalChunks, - options.chunkByDir)) - elif options.chunkByRuntime: - with open(runtime_file, 'r') as f: - runtime_data = json.loads(f.read()) - runtimes = runtime_data['runtimes'] - default = runtime_data['excluded_test_average'] - filters.append( - chunk_by_runtime(options.thisChunk, - options.totalChunks, - runtimes, - default_runtime=default)) - else: - filters.append(chunk_by_slice(options.thisChunk, - options.totalChunks)) - - if options.test_tags: - filters.append(tags(options.test_tags)) - - tests = manifest.active_tests( - exists=False, disabled=disabled, filters=filters, **info) - - if len(tests) == 0: - self.log.error("no tests to run using specified " - "combination of filters: {}".format( - manifest.fmt_filters())) + # Bug 1089034 - imptest failure expectations are encoded as + # test manifests, even though they aren't tests. This gross + # hack causes several problems in automation including + # throwing off the chunking numbers. Remove them manually + # until bug 1089034 is fixed. + def remove_imptest_failure_expectations(tests, values): + return (t for t in tests + if 'imptests/failures' not in t['path']) + + filters = [ + remove_imptest_failure_expectations, + subsuite(options.subsuite), + ] + + if options.test_tags: + filters.append(tags(options.test_tags)) + + if options.test_paths: + options.test_paths = self.normalize_paths(options.test_paths) + filters.append(pathprefix(options.test_paths)) + + # Add chunking filters if specified + if options.totalChunks: + if options.chunkByRuntime: + runtime_file = self.resolve_runtime_file(options, info) + if not os.path.exists(runtime_file): + self.log.warning("runtime file %s not found; defaulting to chunk-by-dir" % + runtime_file) + options.chunkByRuntime = None + flavor = self.getTestFlavor(options) + if flavor in ('browser-chrome', 'devtools-chrome'): + # these values match current mozharness configs + options.chunkbyDir = 5 + else: + options.chunkByDir = 4 + + if options.chunkByDir: + filters.append(chunk_by_dir(options.thisChunk, + options.totalChunks, + options.chunkByDir)) + elif options.chunkByRuntime: + with open(runtime_file, 'r') as f: + runtime_data = json.loads(f.read()) + runtimes = runtime_data['runtimes'] + default = runtime_data['excluded_test_average'] + filters.append( + chunk_by_runtime(options.thisChunk, + options.totalChunks, + runtimes, + default_runtime=default)) + else: + filters.append(chunk_by_slice(options.thisChunk, + options.totalChunks)) + + tests = manifest.active_tests( + exists=False, disabled=disabled, filters=filters, **info) + + if len(tests) == 0: + self.log.error("no tests to run using specified " + "combination of filters: {}".format( + manifest.fmt_filters())) paths = [] - for test in tests: if len(tests) == 1 and 'disabled' in test: del test['disabled'] pathAbs = os.path.abspath(test['path']) assert pathAbs.startswith(self.testRootAbs) tp = pathAbs[len(self.testRootAbs):].replace('\\', '/').strip('/') - # Filter out tests if we are using --test-path - if testPath and not tp.startswith(testPath): - continue - if not self.isTest(options, tp): self.log.warning( 'Warning: %s from manifest %s is not a valid test' % (test['name'], test['manifest'])) continue testob = {'path': tp} if 'disabled' in test: @@ -2127,21 +2104,17 @@ class Mochitest(MochitestUtilsMixin): testsToRun = self.getTestsToRun(options) if not options.runByDir: return self.runMochitests(options, testsToRun, onLaunch) # code for --run-by-dir dirs = self.getDirectories(options) result = 1 # default value, if no tests are run. - inputTestPath = self.getTestPath(options) for d in dirs: - if inputTestPath and not inputTestPath.startswith(d): - continue - print "dir: %s" % d tests_in_dir = [t for t in testsToRun if os.path.dirname(t) == d] # If we are using --run-by-dir, we should not use the profile path (if) provided # by the user, since we need to create a new directory for each run. We would face problems # if we use the directory provided by the user. result = self.runMochitests(options, tests_in_dir, onLaunch) @@ -2291,17 +2264,16 @@ class Mochitest(MochitestUtilsMixin): extraArgs=options.browserArgs, utilityPath=options.utilityPath, debuggerInfo=debuggerInfo, symbolsPath=options.symbolsPath, timeout=timeout, onLaunch=onLaunch, detectShutdownLeaks=detectShutdownLeaks, screenshotOnFail=options.screenshotOnFail, - testPath=options.testPath, bisectChunk=options.bisectChunk, quiet=options.quiet ) except KeyboardInterrupt: self.log.info("runtests.py | Received keyboard interrupt.\n") status = -1 except: traceback.print_exc() @@ -2332,27 +2304,22 @@ class Mochitest(MochitestUtilsMixin): return status def handleTimeout( self, timeout, proc, utilityPath, debuggerInfo, - browserProcessId, - testPath=None): + browserProcessId): """handle process output timeout""" # TODO: bug 913975 : _processOutput should call self.processOutputLine # one more time one timeout (I think) - if testPath: - error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output on %s" % ( - self.lastTestSeen, int(timeout), testPath) - else: - error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % ( - self.lastTestSeen, int(timeout)) + error_message = "TEST-UNEXPECTED-TIMEOUT | %s | application timed out after %d seconds with no output" % ( + self.lastTestSeen, int(timeout)) self.message_logger.dump_buffered() self.message_logger.buffering = False self.log.info(error_message) browserProcessId = browserProcessId or proc.pid self.killAndGetStack( browserProcessId, @@ -2558,17 +2525,16 @@ class Mochitest(MochitestUtilsMixin): def trackShutdownLeaks(self, message): if self.shutdownLeaks: self.shutdownLeaks.log(message) return message def makeTestConfig(self, options): "Creates a test configuration file for customizing test execution." options.logFile = options.logFile.replace("\\", "\\\\") - options.testPath = options.testPath.replace("\\", "\\\\") if "MOZ_HIDE_RESULTS_TABLE" in os.environ and os.environ[ "MOZ_HIDE_RESULTS_TABLE"] == "1": options.hideResultsTable = True # strip certain unnecessary items to avoid serialization errors in json.dumps() d = dict((k, v) for k, v in options.__dict__.items() if (v is None) or isinstance(v,(basestring,numbers.Number)))
--- a/testing/mochitest/runtestsb2g.py +++ b/testing/mochitest/runtestsb2g.py @@ -116,16 +116,18 @@ class B2GMochitest(MochitestUtilsMixin): # TODO bug 839108 - mozprofile should probably handle this manifest = self.addChromeToProfile(options) self.copyExtraFilesToProfile(options) return manifest def run_tests(self, options): """ Prepare, configure, run tests and cleanup """ + self.setTestRoot(options) + manifest = self.build_profile(options) self.logPreamble(self.getActiveTests(options)) # configuring the message logger's buffering self.message_logger.buffering = options.quiet if options.debugger or not options.autorun: timeout = None
--- a/testing/mochitest/runtestsremote.py +++ b/testing/mochitest/runtestsremote.py @@ -542,23 +542,23 @@ def run_test_harness(options): options.extraPrefs.append('extensions.autoupdate.enabled=false') if (options.dm_trans == 'adb' and options.robocopApk): dm._checkCmd(["install", "-r", options.robocopApk]) if not options.autorun: # Force a single loop iteration. The iteration will start Fennec and # the httpd server, but not actually run a test. - options.testPath = robocop_tests[0]['name'] + options.test_paths = [robocop_tests[0]['name']] retVal = None # Filtering tests active_tests = [] for test in robocop_tests: - if options.testPath and options.testPath != test['name']: + if options.test_paths and test['name'] not in options.test_paths: continue if 'disabled' in test: log.info( 'TEST-INFO | skipping %s | %s' % (test['name'], test['disabled'])) continue
--- a/testing/mozbase/manifestparser/manifestparser/filters.py +++ b/testing/mozbase/manifestparser/manifestparser/filters.py @@ -334,16 +334,44 @@ class tags(InstanceFilter): if 'tags' not in test: continue test_tags = [t.strip() for t in test['tags'].split()] if any(t in self.tags for t in test_tags): yield test +class pathprefix(InstanceFilter): + """ + Removes tests that don't start with any of the given test paths. + + :param paths: A list of test paths to filter on + """ + + def __init__(self, paths): + InstanceFilter.__init__(self, paths) + if isinstance(paths, basestring): + paths = [paths] + self.paths = paths + + def __call__(self, tests, values): + for test in tests: + for tp in self.paths: + tp = os.path.normpath(tp) + if not os.path.normpath(test['relpath']).startswith(tp): + continue + + # any test path that points to a single file will be run no + # matter what, even if it's disabled + if 'disabled' in test and os.path.normpath(test['relpath']) == tp: + del test['disabled'] + yield test + break + + # filter container DEFAULT_FILTERS = ( skip_if, run_if, fail_if, ) """
--- a/testing/taskcluster/tasks/tests/b2g_emulator_mochitest_media.yml +++ b/testing/taskcluster/tasks/tests/b2g_emulator_mochitest_media.yml @@ -13,20 +13,20 @@ task: - > python ./mozharness/scripts/b2g_emulator_unittest.py --config-file ./mozharness/configs/b2g/emulator_automation_config.py --config-file ./mozharness_configs/gaia_integration_override.py --config-file ./mozharness_configs/emulator_override.py --config-file ./mozharness_configs/remove_executables.py --download-symbols ondemand --test-suite mochitest - --test-path dom/media/tests/ --installer-url {{build_url}} --test-url {{tests_url}} --xre-url https://queue.taskcluster.net/v1/task/wXAHAaxDQpqxoWF1iljJjg/runs/0/artifacts/public/cache/xulrunner-sdk-40.zip + dom/media/tests artifacts: 'public/build': type: directory path: '/home/worker/artifacts/' expires: '{{#from_now}}1 year{{/from_now}}' extra: treeherder:
--- a/testing/testsuite-targets.mk +++ b/testing/testsuite-targets.mk @@ -1,21 +1,20 @@ # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. -# Shortcut for mochitest* and xpcshell-tests targets, -# replaces 'EXTRA_TEST_ARGS=--test-path=...'. +# Shortcut for mochitest* and xpcshell-tests targets ifdef TEST_PATH -TEST_PATH_ARG := --test-path='$(TEST_PATH)' -IPCPLUGINS_PATH_ARG := --test-path='$(TEST_PATH)' +TEST_PATH_ARG := '$(TEST_PATH)' +IPCPLUGINS_PATH_ARG := '$(TEST_PATH)' else TEST_PATH_ARG := -IPCPLUGINS_PATH_ARG := --test-path=dom/plugins/test +IPCPLUGINS_PATH_ARG := dom/plugins/test endif # include automation-build.mk to get the path to the binary TARGET_DEPTH = $(DEPTH) include $(topsrcdir)/build/binary-location.mk SYMBOLS_PATH := --symbols-path=$(DIST)/crashreporter-symbols @@ -28,51 +27,51 @@ TEST_PACKAGE_NAME := $(ANDROID_PACKAGE_N endif RUN_MOCHITEST_B2G_DESKTOP = \ rm -f ./$@.log && \ $(PYTHON) _tests/testing/mochitest/runtestsb2g.py \ --log-tbpl=./$@.log \ --desktop --profile ${GAIA_PROFILE_DIR} \ --failure-file=$(abspath _tests/testing/mochitest/makefailures.json) \ - $(TEST_PATH_ARG) $(EXTRA_TEST_ARGS) + $(EXTRA_TEST_ARGS) $(TEST_PATH_ARG) RUN_MOCHITEST = \ rm -f ./$@.log && \ $(PYTHON) _tests/testing/mochitest/runtests.py \ --log-tbpl=./$@.log \ --failure-file=$(abspath _tests/testing/mochitest/makefailures.json) \ --testing-modules-dir=$(abspath _tests/modules) \ - $(SYMBOLS_PATH) $(TEST_PATH_ARG) $(EXTRA_TEST_ARGS) + $(SYMBOLS_PATH) $(EXTRA_TEST_ARGS) $(TEST_PATH_ARG) RERUN_MOCHITEST = \ rm -f ./$@.log && \ $(PYTHON) _tests/testing/mochitest/runtests.py \ --log-tbpl=./$@.log \ --run-only-tests=makefailures.json \ --testing-modules-dir=$(abspath _tests/modules) \ - $(SYMBOLS_PATH) $(TEST_PATH_ARG) $(EXTRA_TEST_ARGS) + $(SYMBOLS_PATH) $(EXTRA_TEST_ARGS) $(TEST_PATH_ARG) RUN_MOCHITEST_REMOTE = \ rm -f ./$@.log && \ $(PYTHON) _tests/testing/mochitest/runtestsremote.py \ --log-tbpl=./$@.log $(DM_FLAGS) --dm_trans=$(DM_TRANS) \ --app=$(TEST_PACKAGE_NAME) --deviceIP=${TEST_DEVICE} --xre-path=${MOZ_HOST_BIN} \ --testing-modules-dir=$(abspath _tests/modules) \ - $(SYMBOLS_PATH) $(TEST_PATH_ARG) $(EXTRA_TEST_ARGS) + $(SYMBOLS_PATH) $(EXTRA_TEST_ARGS) $(TEST_PATH_ARG) RUN_MOCHITEST_ROBOCOP = \ rm -f ./$@.log && \ $(PYTHON) _tests/testing/mochitest/runtestsremote.py \ --robocop-apk=$(DEPTH)/build/mobile/robocop/robocop-debug.apk \ --robocop-ids=$(DEPTH)/mobile/android/base/fennec_ids.txt \ --robocop-ini=_tests/testing/mochitest/robocop.ini \ --log-tbpl=./$@.log $(DM_FLAGS) --dm_trans=$(DM_TRANS) \ --app=$(TEST_PACKAGE_NAME) --deviceIP=${TEST_DEVICE} --xre-path=${MOZ_HOST_BIN} \ - $(SYMBOLS_PATH) $(TEST_PATH_ARG) $(EXTRA_TEST_ARGS) + $(SYMBOLS_PATH) $(EXTRA_TEST_ARGS) $(TEST_PATH_ARG) ifndef NO_FAIL_ON_TEST_ERRORS define check_test_error_internal @errors=`grep 'TEST-UNEXPECTED-' $@.log` ;\ if test "$$errors" ; then \ echo '$@ failed:'; \ echo "$$errors"; \ $(if $(1),echo $(1);) \ @@ -157,17 +156,17 @@ ifeq (i386,$(TARGET_CPU)) endif ifeq (x86_64,$(TARGET_CPU)) $(RUN_MOCHITEST) --setpref=dom.ipc.plugins.enabled.x86_64.test.plugin=false $(IPCPLUGINS_PATH_ARG) endif ifeq (powerpc,$(TARGET_CPU)) $(RUN_MOCHITEST) --setpref=dom.ipc.plugins.enabled.ppc.test.plugin=false $(IPCPLUGINS_PATH_ARG) endif else - $(RUN_MOCHITEST) --setpref=dom.ipc.plugins.enabled=false --test-path=dom/plugins/test + $(RUN_MOCHITEST) --setpref=dom.ipc.plugins.enabled=false dom/plugins/test endif $(CHECK_TEST_ERROR) ifeq ($(OS_ARCH),Darwin) webapprt_stub_path = $(TARGET_DIST)/$(MOZ_MACBUNDLE_NAME)/Contents/Resources/webapprt-stub$(BIN_SUFFIX) endif ifeq ($(OS_ARCH),WINNT) webapprt_stub_path = $(TARGET_DIST)/bin/webapprt-stub$(BIN_SUFFIX)