Bug 1400895 - Better try support for test-verify. r=ahal
authorJoel Maher <jmaher@mozilla.com>
Wed, 23 May 2018 10:00:03 -0400
changeset 419631 8f56e6dd916450dd9c3de12894cf531497c0a71e
parent 419630 dcd341823c776b650d3c89c07ad781675095dd83
child 419632 47e81ea1ef10189ef210867934bf36e14cf223dc
child 419656 98a9787f56df2fe6457b2ecca43c751a790914e7
push id64364
push userapavel@mozilla.com
push dateWed, 23 May 2018 22:04:11 +0000
treeherderautoland@538e4b3728c4 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal
bugs1400895
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1400895 - Better try support for test-verify. r=ahal
taskcluster/ci/test/web-platform.yml
taskcluster/taskgraph/optimize.py
taskcluster/taskgraph/transforms/tests.py
taskcluster/taskgraph/util/perfile.py
testing/mozbase/moztest/moztest/resolve.py
tools/tryselect/selectors/fuzzy.py
--- a/taskcluster/ci/test/web-platform.yml
+++ b/taskcluster/ci/test/web-platform.yml
@@ -131,17 +131,17 @@ web-platform-tests-wdspec-headless:
                     - --headless-width=1024
                     - --headless-height=768
                 default:
                     - --test-type=wdspec
                     - --headless
 
 test-verify-wpt:
     description: "Extra verification of web-platform tests modified on this push"
-    suite: test-verify-wpt
+    suite: test-verify
     treeherder-symbol: TVw
     max-run-time: 10800
     run-on-projects:
         by-test-platform:
             # do not run on ccov; see also the enable_code_coverage transform
             linux64-ccov/.*: []
             windows10-64-ccov/debug: []
             # do not run on beta or release: usually just confirms earlier results
--- a/taskcluster/taskgraph/optimize.py
+++ b/taskcluster/taskgraph/optimize.py
@@ -16,16 +16,17 @@ from __future__ import absolute_import, 
 import logging
 import os
 from collections import defaultdict
 
 from .graph import Graph
 from . import files_changed
 from .taskgraph import TaskGraph
 from .util.seta import is_low_value_task
+from .util.perfile import perfile_number_of_chunks
 from .util.taskcluster import find_task_id
 from .util.parameterization import resolve_task_references
 from mozbuild.util import memoize
 from slugid import nice as slugid
 from mozbuild.base import MozbuildObject
 
 logger = logging.getLogger(__name__)
 
@@ -374,8 +375,22 @@ class SkipUnlessSchedules(OptimizationSt
 
         scheduled = self.scheduled_by_push(params['head_repository'], params['head_rev'])
         conditions = set(conditions)
         # if *any* of the condition components are scheduled, do not optimize
         if conditions & scheduled:
             return False
 
         return True
+
+
+class TestVerify(OptimizationStrategy):
+    def should_remove_task(self, task, params, _):
+        # we would like to return 'False, None' while it's high_value_task
+        # and we wouldn't optimize it. Otherwise, it will return 'True, None'
+        env = params.get('try_task_config', {}) or {}
+        env = env.get('templates', {}).get('env', {})
+        if perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''),
+                                    params.get('head_repository', ''),
+                                    params.get('head_rev', ''),
+                                    task):
+            return False
+        return True
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -18,36 +18,35 @@ for example - use `all_tests.py` instead
 """
 
 from __future__ import absolute_import, print_function, unicode_literals
 
 from taskgraph.transforms.base import TransformSequence
 from taskgraph.util.schema import resolve_keyed_by, OptimizationSchema
 from taskgraph.util.treeherder import split_symbol, join_symbol, add_suffix
 from taskgraph.util.platforms import platform_family
-from taskgraph import files_changed
-from mozpack.path import match as mozpackmatch
 from taskgraph.util.schema import (
     validate_schema,
     optionally_keyed_by,
     Schema,
 )
 from taskgraph.util.taskcluster import get_artifact_path
 from mozbuild.schedules import INCLUSIVE_COMPONENTS
 
+from taskgraph.util.perfile import perfile_number_of_chunks
+
 from voluptuous import (
     Any,
     Optional,
     Required,
     Exclusive,
 )
 
 import copy
 import logging
-import math
 
 # default worker types keyed by instance-size
 LINUX_WORKER_TYPES = {
     'large': 'aws-provisioner-v1/gecko-t-linux-large',
     'xlarge': 'aws-provisioner-v1/gecko-t-linux-xlarge',
     'default': 'aws-provisioner-v1/gecko-t-linux-large',
 }
 
@@ -374,17 +373,16 @@ test_description_schema = Schema({
     # The target name, specifying the build artifact to be tested.
     # If None or not specified, a transform sets the target based on OS:
     # target.dmg (Mac), target.apk (Android), target.tar.bz2 (Linux),
     # or target.zip (Windows).
     Optional('target'): optionally_keyed_by(
         'test-platform',
         Any(basestring, None),
     ),
-
 }, required=True)
 
 
 @transforms.add
 def handle_keyed_by_mozharness(config, tests):
     """Resolve a mozharness field if it is keyed by something"""
     for test in tests:
         resolve_keyed_by(test, 'mozharness', item_name=test['test-name'])
@@ -786,89 +784,48 @@ def split_e10s(config, tests):
 
 @transforms.add
 def split_chunks(config, tests):
     """Based on the 'chunks' key, split tests up into chunks by duplicating
     them and assigning 'this-chunk' appropriately and updating the treeherder
     symbol."""
     for test in tests:
         if test['suite'].startswith('test-verify'):
-            test['chunks'] = perfile_number_of_chunks(config, test['test-name'])
-            if test['chunks'] == 0:
-                continue
+            env = config.params.get('try_task_config', {}) or {}
+            env = env.get('templates', {}).get('env', {})
+            test['chunks'] = perfile_number_of_chunks(env.get('MOZHARNESS_TEST_PATHS', ''),
+                                                      config.params.get('head_repository', ''),
+                                                      config.params.get('head_rev', ''),
+                                                      test['test-name'])
+
             # limit the number of chunks we run for test-verify mode because
             # test-verify is comprehensive and takes a lot of time, if we have
             # >30 tests changed, this is probably an import of external tests,
             # or a patch renaming/moving files in bulk
             maximum_number_verify_chunks = 3
             if test['chunks'] > maximum_number_verify_chunks:
                 test['chunks'] = maximum_number_verify_chunks
 
-        if test['chunks'] == 1:
+        if test['chunks'] <= 1:
             test['this-chunk'] = 1
             yield test
             continue
 
         for this_chunk in range(1, test['chunks'] + 1):
             # copy the test and update with the chunk number
             chunked = copy.deepcopy(test)
             chunked['this-chunk'] = this_chunk
 
             # add the chunk number to the TH symbol
             chunked['treeherder-symbol'] = add_suffix(
                 chunked['treeherder-symbol'], this_chunk)
 
             yield chunked
 
 
-def perfile_number_of_chunks(config, type):
-    # A rough estimate of how many chunks we need based on simple rules
-    # for determining what a test file is.
-
-    # TODO: Make this flexible based on coverage vs verify || test type
-    tests_per_chunk = 10.0
-
-    if type.startswith('test-verify-wpt'):
-        file_patterns = ['testing/web-platform/tests/**']
-    elif type.startswith('test-verify-gpu'):
-        file_patterns = ['**/*webgl*/**/test_*',
-                         '**/dom/canvas/**/test_*',
-                         '**/gfx/tests/**/test_*',
-                         '**/devtools/canvasdebugger/**/browser_*',
-                         '**/reftest*/**']
-    elif type.startswith('test-verify'):
-        file_patterns = ['**/test_*',
-                         '**/browser_*',
-                         '**/crashtest*/**',
-                         'js/src/test/test/',
-                         'js/src/test/non262/',
-                         'js/src/test/test262/']
-
-    changed_files = files_changed.get_changed_files(config.params.get('head_repository'),
-                                                    config.params.get('head_rev'))
-    test_count = 0
-    for pattern in file_patterns:
-        for path in changed_files:
-            if mozpackmatch(path, pattern):
-                gpu = False
-                if type == 'test-verify-e10s':
-                    # file_patterns for test-verify will pick up some gpu tests, lets ignore
-                    # in the case of reftest, we will not have any in the regular case
-                    gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
-                    for gdir in gpu_dirs:
-                        if len(path.split(gdir)) > 1:
-                            gpu = True
-
-                if not gpu:
-                    test_count += 1
-
-    chunks = test_count/tests_per_chunk
-    return int(math.ceil(chunks))
-
-
 @transforms.add
 def allow_software_gl_layers(config, tests):
     """
     Handle the "allow-software-gl-layers" property for platforms where it
     applies.
     """
     for test in tests:
         if test.get('allow-software-gl-layers'):
new file mode 100644
--- /dev/null
+++ b/taskcluster/taskgraph/util/perfile.py
@@ -0,0 +1,81 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import logging
+import math
+
+from mozbuild.util import memoize
+from mozpack.path import match as mozpackmatch
+from mozversioncontrol import get_repository_object, InvalidRepoPath
+from taskgraph import files_changed
+from .. import GECKO
+
+logger = logging.getLogger(__name__)
+
+
+@memoize
+def perfile_number_of_chunks(try_task_config, head_repository, head_rev, type):
+    # TODO: Make this flexible based on coverage vs verify || test type
+    tests_per_chunk = 10.0
+
+    if type.startswith('test-verify-wpt'):
+        file_patterns = ['testing/web-platform/tests/**',
+                         'testing/web-platform/mozilla/tests/**']
+    elif type.startswith('test-verify-gpu'):
+        file_patterns = ['**/*webgl*/**/test_*',
+                         '**/dom/canvas/**/test_*',
+                         '**/gfx/tests/**/test_*',
+                         '**/devtools/canvasdebugger/**/browser_*',
+                         '**/reftest*/**']
+    elif type.startswith('test-verify'):
+        file_patterns = ['**/test_*',
+                         '**/browser_*',
+                         '**/crashtest*/**',
+                         'js/src/test/test/**',
+                         'js/src/test/non262/**',
+                         'js/src/test/test262/**']
+    else:
+        # Returning 0 means no tests to run, this captures non test-verify tasks
+        return 1
+
+    changed_files = set()
+    specified_files = []
+    if try_task_config:
+        specified_files = try_task_config.split(":")
+
+    try:
+        vcs = get_repository_object(GECKO)
+        changed_files.update(vcs.get_outgoing_files('AM'))
+    except InvalidRepoPath:
+        vcs = None
+
+    if not changed_files:
+        changed_files.update(files_changed.get_changed_files(head_repository,
+                                                             head_rev))
+
+    changed_files.update(specified_files)
+    test_count = 0
+    for pattern in file_patterns:
+        for path in changed_files:
+            # TODO: consider running tests if a manifest changes
+            if path.endswith('.list') or path.endswith('.ini'):
+                continue
+
+            if mozpackmatch(path, pattern):
+                gpu = False
+                if type == 'test-verify-e10s':
+                    # file_patterns for test-verify will pick up some gpu tests, lets ignore
+                    # in the case of reftest, we will not have any in the regular case
+                    gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
+                    for gdir in gpu_dirs:
+                        if len(path.split(gdir)) > 1:
+                            gpu = True
+
+                if not gpu:
+                    test_count += 1
+
+    chunks = test_count/tests_per_chunk
+    return int(math.ceil(chunks))
--- a/testing/mozbase/moztest/moztest/resolve.py
+++ b/testing/mozbase/moztest/moztest/resolve.py
@@ -58,103 +58,114 @@ TEST_SUITES = {
         'aliases': ('mn',),
         'mach_command': 'marionette-test',
         'kwargs': {'tests': None},
     },
     'mochitest-a11y': {
         'aliases': ('a11y', 'ally'),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'a11y', 'test_paths': None},
-        'task_regex': 'mochitest-a11y(?:-1)?$',
+        'task_regex': ['mochitest-a11y(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-browser': {
         'aliases': ('bc', 'browser-chrome'),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'browser-chrome', 'test_paths': None},
-        'task_regex': 'mochitest-browser-chrome(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-browser-chrome(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-chrome': {
         'aliases': ('mc',),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'chrome', 'test_paths': None},
-        'task_regex': 'mochitest-chrome(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-chrome(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-clipboard': {
         'aliases': ('cl', 'clipboard',),
         'mach_command': 'mochitest',
         'kwargs': {'subsuite': 'clipboard', 'test_paths': None},
-        'task_regex': 'mochitest-clipboard(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-clipboard(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-devtools': {
         'aliases': ('dt', 'devtools-chrome'),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'browser-chrome', 'subsuite': 'devtools', 'test_paths': None},
-        'task_regex': 'mochitest-devtools-chrome(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-devtools-chrome(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-gpu': {
         'aliases': ('gpu',),
         'mach_command': 'mochitest',
         'kwargs': {'subsuite': 'gpu', 'test_paths': None},
-        'task_regex': 'mochitest-gpu(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-gpu(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-gpu)?(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-media': {
         'aliases': ('mpm', 'plain-media'),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'plain', 'subsuite': 'media', 'test_paths': None},
-        'task_regex': 'mochitest-media(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-media(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-plain': {
         'aliases': ('mp', 'plain',),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'plain', 'test_paths': None},
-        'task_regex': 'mochitest(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-screenshots': {
         'aliases': ('ss', 'screenshots-chrome'),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'browser-chrome', 'subsuite': 'screenshots', 'test_paths': None},
-        'task_regex': 'browser-screenshots(?:-e10s)?(?:-1)?$',
+        'task_regex': ['browser-screenshots(?:-e10s)?(?:-1)?$'],
     },
     'mochitest-webgl': {
         'aliases': ('webgl',),
         'mach_command': 'mochitest',
         'kwargs': {'flavor': 'plain', 'subsuite': 'webgl', 'test_paths': None},
-        'task_regex': 'mochitest-webgl(?:-e10s)?(?:-1)?$',
+        'task_regex': ['mochitest-webgl(?:-e10s)?(?:-1)?$',
+                       'test-verify(?:-gpu)?(?:-e10s)?(?:-1)?$'],
     },
     'python': {
         'mach_command': 'python-test',
         'kwargs': {'tests': None},
     },
     'reftest': {
         'aliases': ('rr',),
         'mach_command': 'reftest',
         'kwargs': {'tests': None},
-        'task_regex': '(opt|debug)-reftest(?:-no-accel|-gpu|-stylo)?(?:-e10s)?(?:-1)?$',
+        'task_regex': ['(opt|debug)-reftest(?:-no-accel|-gpu|-stylo)?(?:-e10s)?(?:-1)?$',
+                       'test-verify-gpu(?:-e10s)?(?:-1)?$'],
     },
     'robocop': {
         'mach_command': 'robocop',
         'kwargs': {'test_paths': None},
-        'task_regex': 'robocop(?:-e10s)?(?:-1)?$',
+        'task_regex': ['robocop(?:-e10s)?(?:-1)?$'],
     },
     'web-platform-tests': {
         'aliases': ('wpt',),
         'mach_command': 'web-platform-tests',
         'kwargs': {'include': []},
-        'task_regex': 'web-platform-tests(?:-reftests|-wdspec)?(?:-e10s)?(?:-1)?$',
+        'task_regex': ['web-platform-tests(?:-reftests|-wdspec)?(?:-e10s)?(?:-1)?$',
+                       'test-verify-wpt-e10s'],
     },
     'valgrind': {
         'aliases': ('v',),
         'mach_command': 'valgrind-test',
         'kwargs': {},
     },
     'xpcshell': {
         'aliases': ('x',),
         'mach_command': 'xpcshell-test',
         'kwargs': {'test_file': 'all'},
-        'task_regex': 'xpcshell(?:-1)?$',
+        'task_regex': ['xpcshell(?:-1)?$', 'test-verify(?:-1)?$'],
     },
 }
 
 for i in range(1, MOCHITEST_TOTAL_CHUNKS + 1):
     TEST_SUITES['mochitest-%d' % i] = {
         'aliases': ('m%d' % i,),
         'mach_command': 'mochitest',
         'kwargs': {
--- a/tools/tryselect/selectors/fuzzy.py
+++ b/tools/tryselect/selectors/fuzzy.py
@@ -181,17 +181,17 @@ def filter_by_paths(tasks, paths):
     task_regexes = set()
     for flavor, subsuite in flavors:
         suite = get_suite_definition(flavor, subsuite, strict=True)
         if 'task_regex' not in suite:
             print("warning: no tasks could be resolved from flavor '{}'{}".format(
                     flavor, " and subsuite '{}'".format(subsuite) if subsuite else ""))
             continue
 
-        task_regexes.add(suite['task_regex'])
+        task_regexes.update(suite['task_regex'])
 
     def match_task(task):
         return any(re.search(pattern, task) for pattern in task_regexes)
 
     return filter(match_task, tasks)
 
 
 def run_fuzzy_try(update=False, query=None, templates=None, full=False, parameters=None,