Bug 1455872 - Add taskcluster configs for raptor on OSX; r=jmaher
authorRob Wood <rwood@mozilla.com>
Tue, 24 Apr 2018 10:30:42 -0400
changeset 417613 32355d123d16b792bb3fafbf81a3541e0a54b2ef
parent 417612 28452fd08a415f3dbb59999b4e286e028759eaf5
child 417614 b1ad2c2fdb80eba85fdfb57cb01cd158b45e06c8
push id33974
push userncsoregi@mozilla.com
push dateThu, 10 May 2018 09:47:43 +0000
treeherdermozilla-central@b52b2eb81d1e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs1455872
milestone62.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1455872 - Add taskcluster configs for raptor on OSX; r=jmaher MozReview-Commit-ID: I67InZTEziy
python/mozbuild/mozbuild/schedules.py
taskcluster/ci/config.yml
taskcluster/ci/test/kind.yml
taskcluster/ci/test/raptor.yml
taskcluster/ci/test/test-platforms.yml
taskcluster/ci/test/test-sets.yml
taskcluster/docs/attributes.rst
taskcluster/taskgraph/target_tasks.py
taskcluster/taskgraph/transforms/tests.py
taskcluster/taskgraph/try_option_syntax.py
--- a/python/mozbuild/mozbuild/schedules.py
+++ b/python/mozbuild/mozbuild/schedules.py
@@ -45,16 +45,17 @@ EXCLUSIVE_COMPONENTS = [
     'awsy',
     'cppunittest',
     'firefox-ui',
     'geckoview',
     'geckoview-junit',
     'gtest',
     'marionette',
     'mochitest',
+    'raptor',
     'reftest',
     'robocop',
     'talos',
     'telemetry-tests-client',
     'xpcshell',
     'xpcshell-coverage',
     'web-platform-tests',
     'web-platform-tests-reftests',
--- a/taskcluster/ci/config.yml
+++ b/taskcluster/ci/config.yml
@@ -10,16 +10,18 @@ treeherder:
         'Fxfn-l-e10s': 'Firefox functional tests (local) with e10s'
         'Fxfn-r': 'Firefox functional tests (remote)'
         'Fxfn-r-e10s': 'Firefox functional tests (remote) with e10s'
         'M': 'Mochitests'
         'M-e10s': 'Mochitests with e10s'
         'M-V': 'Mochitests on Valgrind'
         'R': 'Reftests'
         'R-e10s': 'Reftests with e10s'
+        'Rap': 'Raptor performance tests on Firefox'
+        'Rap-e10s': 'Raptor performance tests on Firefox with e10s'
         'T': 'Talos performance tests'
         'Tsd': 'Talos performance tests with Stylo disabled'
         'Tss': 'Talos performance tests with Stylo sequential'
         'T-e10s': 'Talos performance tests with e10s'
         'Tsd-e10s': 'Talos performance tests with e10s, Stylo disabled'
         'Tss-e10s': 'Talos performance tests with e10s, Stylo sequential'
         'T-P-e10s': 'Talos performance tests with e10s and gecko profiling'
         'tt-c': 'Telemetry client marionette tests'
--- a/taskcluster/ci/test/kind.yml
+++ b/taskcluster/ci/test/kind.yml
@@ -21,12 +21,13 @@ transforms:
 
 jobs-from:
     - awsy.yml
     - compiled.yml
     - firefox-ui.yml
     - marionette.yml
     - misc.yml
     - mochitest.yml
+    - raptor.yml
     - reftest.yml
     - talos.yml
     - web-platform.yml
     - xpcshell.yml
new file mode 100644
--- /dev/null
+++ b/taskcluster/ci/test/raptor.yml
@@ -0,0 +1,40 @@
+job-defaults:
+    max-run-time: 1800
+    suite: raptor
+    tier:
+        by-test-platform:
+            windows10-64-ccov/.*: 3
+            linux64-ccov/.*: 3
+            default: 3
+    virtualization:
+        by-test-platform:
+            windows10-64-ccov/.*: virtual
+            default: hardware
+    mozharness:
+        script: raptor_script.py
+        no-read-buildbot-config: true
+        config:
+            by-test-platform:
+                macosx.*:
+                    - raptor/mac_config.py
+                windows.*:
+                    - raptor/windows_config.py
+                windows10-64-ccov/debug:
+                    - raptor/windows_vm_config.py
+                linux64-ccov/opt:
+                    - raptor/linux64_config_taskcluster.py
+                default:
+                    - raptor/linux_config.py
+
+raptor-firefox-tp6:
+    description: "Raptor Firefox tp6"
+    try-name: raptor-firefox-tp6
+    treeherder-symbol: Rap(tp6)
+    run-on-projects:
+        by-test-platform:
+            .*-qr/.*: ['try']
+            default: ['try']
+    max-run-time: 1200
+    mozharness:
+        extra-options:
+            - --test=raptor-firefox-tp6
--- a/taskcluster/ci/test/test-platforms.yml
+++ b/taskcluster/ci/test/test-platforms.yml
@@ -237,16 +237,17 @@ macosx64/debug:
 
 macosx64/opt:
     build-platform: macosx64/opt
     test-sets:
         - macosx64-talos
         - macosx64-tests
         - desktop-screenshot-capture
         - awsy
+        - macosx64-raptor
 
 macosx64-nightly/opt:
     build-platform: macosx64-nightly/opt
     test-sets:
         - macosx64-tests
         - macosx64-talos-profiling
         - awsy
 
--- a/taskcluster/ci/test/test-sets.yml
+++ b/taskcluster/ci/test/test-sets.yml
@@ -305,16 +305,19 @@ macosx64-talos-profiling:
     - talos-tp5o-profiling
     - talos-tp6-profiling
     # - talos-tps-profiling # Bug 1453007 times out
 
 macosx64-qr-tests:
     - reftest
     - reftest-fonts
 
+macosx64-raptor:
+    - raptor-firefox-tp6
+
 linux32-tests:
     - cppunit
     - crashtest
     - firefox-ui-functional-local
     - firefox-ui-functional-remote
     - gtest
     - jittest
     - jsreftest
--- a/taskcluster/docs/attributes.rst
+++ b/taskcluster/docs/attributes.rst
@@ -96,16 +96,21 @@ unittest_try_name
 This is the name used to refer to a unit test via try syntax.  It
 may not match either of ``unittest_suite`` or ``unittest_flavor``.
 
 talos_try_name
 ==============
 
 This is the name used to refer to a talos job via try syntax.
 
+raptor_try_name
+===============
+
+This is the name used to refer to a raptor job via try syntax.
+
 job_try_name
 ============
 
 This is the name used to refer to a "job" via try syntax (``-j``).  Note that for
 some kinds, ``-j`` also matches against ``build_platform``.
 
 test_chunk
 ==========
--- a/taskcluster/taskgraph/target_tasks.py
+++ b/taskcluster/taskgraph/target_tasks.py
@@ -65,17 +65,18 @@ def filter_beta_release_tasks(task, para
         return False
 
     if platform in (
             'linux', 'linux64',
             'macosx64',
             'win32', 'win64',
             ):
         if task.attributes['build_type'] == 'opt' and \
-           task.attributes.get('unittest_suite') != 'talos':
+           task.attributes.get('unittest_suite') != 'talos' and \
+           task.attributes.get('unittest_suite') != 'raptor':
             return False
 
     # skip l10n, beetmover, balrog
     if task.kind in ignore_kinds:
         return False
 
     # No l10n repacks per push. They may be triggered by kinds which depend
     # on l10n builds/repacks. For instance: "repackage-signing"
@@ -124,16 +125,21 @@ def _try_option_syntax(full_task_graph, 
             task.attributes['task_duplicates'] = options.trigger_tests
             task.attributes['profile'] = False
 
         # If the developer wants test talos jobs to be rebuilt N times we add that value here
         if options.talos_trigger_tests > 1 and task.attributes.get('unittest_suite') == 'talos':
             task.attributes['task_duplicates'] = options.talos_trigger_tests
             task.attributes['profile'] = options.profile
 
+        # If the developer wants test raptor jobs to be rebuilt N times we add that value here
+        if options.raptor_trigger_tests > 1 and task.attributes.get('unittest_suite') == 'raptor':
+            task.attributes['task_duplicates'] = options.raptor_trigger_tests
+            task.attributes['profile'] = options.profile
+
         task.attributes.update(attributes)
 
     # Add notifications here as well
     if options.notifications:
         for task in full_task_graph:
             owner = parameters.get('owner')
             routes = task.task.setdefault('routes', [])
             if options.notifications == 'all':
@@ -190,16 +196,19 @@ def target_tasks_ash(full_task_graph, pa
             return False
         # no non-e10s tests
         if task.attributes.get('unittest_suite'):
             if not task.attributes.get('e10s'):
                 return False
             # don't run talos on ash
             if task.attributes.get('unittest_suite') == 'talos':
                 return False
+            # don't run raptor on ash
+            if task.attributes.get('unittest_suite') == 'raptor':
+                return False
         # don't upload symbols
         if task.attributes['kind'] == 'upload-symbols':
             return False
         return True
     return [l for l, t in full_task_graph.tasks.iteritems() if filter(t)]
 
 
 @_target_task('cedar_tasks')
--- a/taskcluster/taskgraph/transforms/tests.py
+++ b/taskcluster/taskgraph/transforms/tests.py
@@ -484,16 +484,35 @@ def setup_talos(config, tests):
         if config.params.is_try():
             extra_options.append('--branch-name')
             extra_options.append('try')
 
         yield test
 
 
 @transforms.add
+def setup_raptor(config, tests):
+    """Add options that are specific to raptor jobs (identified by suite=raptor)"""
+    for test in tests:
+        if test['suite'] != 'raptor':
+            yield test
+            continue
+
+        extra_options = test.setdefault('mozharness', {}).setdefault('extra-options', [])
+
+        # Per https://bugzilla.mozilla.org/show_bug.cgi?id=1357753#c3, branch
+        # name is only required for try
+        if config.params.is_try():
+            extra_options.append('--branch-name')
+            extra_options.append('try')
+
+        yield test
+
+
+@transforms.add
 def handle_artifact_prefix(config, tests):
     """Handle translating `artifact_prefix` appropriately"""
     for test in tests:
         if test['build-attributes'].get('artifact_prefix'):
             test.setdefault("attributes", {}).setdefault(
                 'artifact_prefix', test['build-attributes']['artifact_prefix']
             )
         yield test
@@ -714,16 +733,20 @@ def enable_code_coverage(config, tests):
                 test['mozharness']['extra-options'].append('--add-option')
                 test['mozharness']['extra-options'].append('--cycles,1')
                 test['mozharness']['extra-options'].append('--add-option')
                 test['mozharness']['extra-options'].append('--tppagecycles,1')
                 test['mozharness']['extra-options'].append('--add-option')
                 test['mozharness']['extra-options'].append('--no-upload-results')
                 test['mozharness']['extra-options'].append('--add-option')
                 test['mozharness']['extra-options'].append('--tptimeout,15000')
+            if 'raptor' in test['test-name']:
+                test['max-run-time'] = 1800
+                if 'linux' in test['build-platform']:
+                    test['docker-image'] = {"in-tree": "desktop1604-test"}
         elif test['build-platform'] == 'linux64-jsdcov/opt':
             # Ensure we don't run on inbound/autoland/beta, but if the test is try only, ignore it
             if 'mozilla-central' in test['run-on-projects'] or \
                     test['run-on-projects'] == 'built-projects':
                 test['run-on-projects'] = ['mozilla-central', 'try']
             test['mozharness'].setdefault('extra-options', []).append('--jsd-code-coverage')
         yield test
 
@@ -752,17 +775,17 @@ def split_e10s(config, tests):
             test['test-name'] += '-e10s'
             test['try-name'] += '-e10s'
             test['e10s'] = True
             test['attributes']['e10s'] = True
             group, symbol = split_symbol(test['treeherder-symbol'])
             if group != '?':
                 group += '-e10s'
             test['treeherder-symbol'] = join_symbol(group, symbol)
-            if test['suite'] == 'talos':
+            if test['suite'] == 'talos' or test['suite'] == 'raptor':
                 for i, option in enumerate(test['mozharness']['extra-options']):
                     if option.startswith('--suite='):
                         test['mozharness']['extra-options'][i] += '-e10s'
             else:
                 test['mozharness']['extra-options'].append('--e10s')
         yield test
 
 
@@ -804,48 +827,48 @@ def split_chunks(config, tests):
 
 def perfile_number_of_chunks(config, type):
     # A rough estimate of how many chunks we need based on simple rules
     # for determining what a test file is.
 
     # TODO: Make this flexible based on coverage vs verify || test type
     tests_per_chunk = 10.0
 
-    if type.startswith('test-verify-wpt'):
-        file_patterns = ['testing/web-platform/tests/**']
-    elif type.startswith('test-verify-gpu'):
-        file_patterns = ['**/*webgl*/**/test_*',
-                         '**/dom/canvas/**/test_*',
-                         '**/gfx/tests/**/test_*',
+    if type.startswith('test-verify-wpt'):
+        file_patterns = ['testing/web-platform/tests/**']
+    elif type.startswith('test-verify-gpu'):
+        file_patterns = ['**/*webgl*/**/test_*',
+                         '**/dom/canvas/**/test_*',
+                         '**/gfx/tests/**/test_*',
                          '**/devtools/canvasdebugger/**/browser_*',
                          '**/reftest*/**']
-    elif type.startswith('test-verify'):
-        file_patterns = ['**/test_*',
-                         '**/browser_*',
-                         '**/crashtest*/**',
-                         'js/src/test/test/',
-                         'js/src/test/non262/',
-                         'js/src/test/test262/']
+    elif type.startswith('test-verify'):
+        file_patterns = ['**/test_*',
+                         '**/browser_*',
+                         '**/crashtest*/**',
+                         'js/src/test/test/',
+                         'js/src/test/non262/',
+                         'js/src/test/test262/']
 
     changed_files = files_changed.get_changed_files(config.params.get('head_repository'),
                                                     config.params.get('head_rev'))
     test_count = 0
-    for pattern in file_patterns:
-        for path in changed_files:
-            if mozpackmatch(path, pattern):
-                gpu = False
-                if type == 'test-verify-e10s':
-                    # file_patterns for test-verify will pick up some gpu tests, lets ignore
-                    # in the case of reftest, we will not have any in the regular case
-                    gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
-                    for gdir in gpu_dirs:
-                        if len(path.split(gdir)) > 1:
-                            gpu = True
-
-                if not gpu:
+    for pattern in file_patterns:
+        for path in changed_files:
+            if mozpackmatch(path, pattern):
+                gpu = False
+                if type == 'test-verify-e10s':
+                    # file_patterns for test-verify will pick up some gpu tests, lets ignore
+                    # in the case of reftest, we will not have any in the regular case
+                    gpu_dirs = ['dom/canvas', 'gfx/tests', 'devtools/canvasdebugger', 'webgl']
+                    for gdir in gpu_dirs:
+                        if len(path.split(gdir)) > 1:
+                            gpu = True
+
+                if not gpu:
                     test_count += 1
 
     chunks = test_count/tests_per_chunk
     return int(math.ceil(chunks))
 
 
 @transforms.add
 def allow_software_gl_layers(config, tests):
@@ -992,16 +1015,18 @@ def make_job_description(config, tests):
         if test['chunks'] > 1:
             label += '-{}'.format(test['this-chunk'])
 
         build_label = test['build-label']
 
         try_name = test['try-name']
         if test['suite'] == 'talos':
             attr_try_name = 'talos_try_name'
+        elif test['suite'] == 'raptor':
+            attr_try_name = 'raptor_try_name'
         else:
             attr_try_name = 'unittest_try_name'
 
         attr_build_platform, attr_build_type = test['build-platform'].split('/', 1)
 
         attributes = test.get('attributes', {})
         attributes.update({
             'build_platform': attr_build_platform,
--- a/taskcluster/taskgraph/try_option_syntax.py
+++ b/taskcluster/taskgraph/try_option_syntax.py
@@ -196,25 +196,28 @@ def parse_message(message):
     # Argument parser based on try flag flags
     parser = argparse.ArgumentParser()
     parser.add_argument('-b', '--build', dest='build_types')
     parser.add_argument('-p', '--platform', nargs='?',
                         dest='platforms', const='all', default='all')
     parser.add_argument('-u', '--unittests', nargs='?',
                         dest='unittests', const='all', default='all')
     parser.add_argument('-t', '--talos', nargs='?', dest='talos', const='all', default='none')
+    parser.add_argument('-r', '--raptor', nargs='?', dest='raptor', const='all', default='none')
     parser.add_argument('-i', '--interactive',
                         dest='interactive', action='store_true', default=False)
     parser.add_argument('-e', '--all-emails',
                         dest='notifications', action='store_const', const='all')
     parser.add_argument('-f', '--failure-emails',
                         dest='notifications', action='store_const', const='failure')
     parser.add_argument('-j', '--job', dest='jobs', action='append')
     parser.add_argument('--rebuild-talos', dest='talos_trigger_tests', action='store',
                         type=int, default=1)
+    parser.add_argument('--rebuild-raptor', dest='raptor_trigger_tests', action='store',
+                        type=int, default=1)
     parser.add_argument('--setenv', dest='env', action='append')
     parser.add_argument('--geckoProfile', dest='profile', action='store_true')
     parser.add_argument('--tag', dest='tag', action='store', default=None)
     parser.add_argument('--no-retry', dest='no_retry', action='store_true')
     parser.add_argument('--include-nightly', dest='include_nightly', action='store_true')
 
     # While we are transitioning from BB to TC, we want to push jobs to tc-worker
     # machines but not overload machines with every try push. Therefore, we add
@@ -258,38 +261,42 @@ class TryOptionSyntax(object):
         }
         """
         self.graph_config = graph_config
         self.jobs = []
         self.build_types = []
         self.platforms = []
         self.unittests = []
         self.talos = []
+        self.raptor = []
         self.trigger_tests = 0
         self.interactive = False
         self.notifications = None
         self.talos_trigger_tests = 0
+        self.raptor_trigger_tests = 0
         self.env = []
         self.profile = False
         self.tag = None
         self.no_retry = False
 
         options = parameters['try_options']
         if not options:
             return None
         self.jobs = self.parse_jobs(options['jobs'])
         self.build_types = self.parse_build_types(options['build_types'], full_task_graph)
         self.platforms = self.parse_platforms(options['platforms'], full_task_graph)
         self.unittests = self.parse_test_option(
             "unittest_try_name", options['unittests'], full_task_graph)
         self.talos = self.parse_test_option("talos_try_name", options['talos'], full_task_graph)
+        self.raptor = self.parse_test_option("raptor_try_name", options['raptor'], full_task_graph)
         self.trigger_tests = options['trigger_tests']
         self.interactive = options['interactive']
         self.notifications = options['notifications']
         self.talos_trigger_tests = options['talos_trigger_tests']
+        self.raptor_trigger_tests = options['raptor_trigger_tests']
         self.env = options['env']
         self.profile = options['profile']
         self.tag = options['tag']
         self.no_retry = options['no_retry']
         self.include_nightly = options['include_nightly']
 
     def parse_jobs(self, jobs_arg):
         if not jobs_arg or jobs_arg == ['none']:
@@ -578,17 +585,18 @@ class TryOptionSyntax(object):
 
             # User specified `-j all`
             if self.platforms is not None and attr('build_platform') not in self.platforms:
                 return False  # honor -p for jobs governed by a platform
             # "all" means "everything with `try` in run_on_projects"
             return check_run_on_projects()
         elif attr('kind') == 'test':
             return match_test(self.unittests, 'unittest_try_name') \
-                 or match_test(self.talos, 'talos_try_name')
+                 or match_test(self.talos, 'talos_try_name') \
+                 or match_test(self.raptor, 'raptor_try_name')
         elif attr('kind') in BUILD_KINDS:
             if attr('build_type') not in self.build_types:
                 return False
             elif self.platforms is None:
                 # for "-p all", look for try in the 'run_on_projects' attribute
                 return check_run_on_projects()
             else:
                 if attr('build_platform') not in self.platforms:
@@ -603,18 +611,20 @@ class TryOptionSyntax(object):
                 return '<all>'
             return ', '.join(str(e) for e in list)
 
         return "\n".join([
             "build_types: " + ", ".join(self.build_types),
             "platforms: " + none_for_all(self.platforms),
             "unittests: " + none_for_all(self.unittests),
             "talos: " + none_for_all(self.talos),
+            "raptor" + none_for_all(self.raptor),
             "jobs: " + none_for_all(self.jobs),
             "trigger_tests: " + str(self.trigger_tests),
             "interactive: " + str(self.interactive),
             "notifications: " + str(self.notifications),
             "talos_trigger_tests: " + str(self.talos_trigger_tests),
+            "raptor_trigger_tests: " + str(self.raptor_trigger_tests),
             "env: " + str(self.env),
             "profile: " + str(self.profile),
             "tag: " + str(self.tag),
             "no_retry: " + str(self.no_retry),
         ])