Bug 1472992 - [jsshell] Run javascript shell benchmarks against Google V8, r=jmaher
authorAndrew Halberstadt <ahalberstadt@mozilla.com>
Wed, 15 Aug 2018 13:52:47 +0000
changeset 829439 2cc6ec2f7f0e1f945858b493133c6a26c0b4c09f
parent 829438 c340d01c2552260a7d96ec5699594e6167f7bb43
child 829440 41443a067773651862461210bd73a521fb396aaf
child 829535 0fd6d03ce4a1dff0d9d3fe9904cd940532a4a1a8
push id118781
push userktomlinson@mozilla.com
push dateThu, 16 Aug 2018 02:46:06 +0000
reviewersjmaher
bugs1472992
milestone63.0a1
Bug 1472992 - [jsshell] Run javascript shell benchmarks against Google V8, r=jmaher This runs the jsshell benchmarks against Google's V8 engine in addition to spidermonkey. Both shells will run in the same task to keep things simple and decluttered. Though we could split them out to separate tasks at a later date if needed. Differential Revision: https://phabricator.services.mozilla.com/D3356
taskcluster/ci/config.yml
taskcluster/ci/fetch/benchmarks.yml
taskcluster/ci/source-test/jsshell.yml
taskcluster/taskgraph/transforms/source_test.py
testing/jsshell/benchmark.py
--- a/taskcluster/ci/config.yml
+++ b/taskcluster/ci/config.yml
@@ -1,14 +1,15 @@
 trust-domain: gecko
 project-repo-param-prefix: ''
 treeherder:
     group-names:
         'cram': 'Cram tests'
-        'js-bench': 'JavaScript shell benchmarks'
+        'js-bench-sm': 'JavaScript shell benchmarks with Spidermonkey'
+        'js-bench-v8': 'JavaScript shell benchmarks with Google V8'
         'mocha': 'Mocha unit tests'
         'py2': 'Python 2 unit tests'
         'py3': 'Python 3 unit tests'
         'A': 'Android Gradle tests'
         'Fetch-URL': 'Fetch and store content'
         'Fxfn-l': 'Firefox functional tests (local)'
         'Fxfn-l-e10s': 'Firefox functional tests (local) with e10s'
         'Fxfn-r': 'Firefox functional tests (remote)'
--- a/taskcluster/ci/fetch/benchmarks.yml
+++ b/taskcluster/ci/fetch/benchmarks.yml
@@ -1,26 +1,31 @@
+d8:
+  description: V8 debug shell
+  fetch:
+    type: static-url
+    url: https://github.com/mozilla/perf-automation/releases/download/d8-6.7.17/d8-6.7.17.zip
+    sha256: 0aa1c4e630de78373185fc1c0fa34bc87826f63fd4cbb664668891d6f6a6b24e
+    size: 20578358
+
 unity-webgl:
   description: unity-webgl benchmark
   fetch:
     type: static-url
-    artifact-name: unity-webgl.zip
     url: https://github.com/mozilla/perf-automation/releases/download/unity-webgl-v1/unity-webgl-6beb3d3e22ab.zip
     sha256: f71ee3a3f5b9513f041e1dd01c032d51f2071e1ad130e8ac2cf0c553c468b9ea
     size: 27062962
 
 assorted-dom:
   description: assorted-dom benchmark
   fetch:
     type: static-url
-    artifact-name: assorted-dom.zip
     url: https://github.com/mozilla/perf-automation/releases/download/assorted-dom-v1/assorted-dom-4befd28725c6.zip
     sha256: e4eafe4a8e70c7ae6d42d668d3b1640b9fd9b696c486ff35aab754c368f78c2c
     size: 402665
 
 web-tooling-benchmark:
   description: Web Tooling Benchmark
   fetch:
     type: static-url
-    artifact-name: web-tooling-benchmark.zip
     url: https://github.com/mozilla/perf-automation/releases/download/V1/web-tooling-benchmark-b2ac25c897c9.zip
     sha256: 93b0b51df0cec3ca9bfa0bdf81d782306dcf18532e39b3ff3180409125daaff1
     size: 5444135
--- a/taskcluster/ci/source-test/jsshell.yml
+++ b/taskcluster/ci/source-test/jsshell.yml
@@ -3,52 +3,62 @@ job-defaults:
     require-build: true
     worker-type:
         by-platform:
             linux64.*: releng-hardware/gecko-t-linux-talos
     worker:
         by-platform:
             linux64.*:
                 env:
-                    SHELL: /bin/bash
-                    JSSHELL: /home/cltbld/fetches/js
+                    by-shell:
+                        sm:
+                            SHELL: /bin/bash
+                            JSSHELL: /home/cltbld/fetches/js
+                        v8:
+                            SHELL: /bin/bash
+                            JSSHELL: /home/cltbld/fetches/d8/d8
                 max-run-time: 1800
     treeherder:
         kind: test
         tier: 2
     run:
-        using: mach
+        using: run-task
         workdir: /home/cltbld
+        command: >
+            cd $GECKO_PATH &&
+            ./mach jsshell-bench --perfherder={shell} --binary=$JSSHELL {test}
     run-on-projects: ['mozilla-central', 'try']
     fetches:
         build:
             - target.jsshell.zip
+        fetch:
+            - d8
 
 bench-ares6:
     description: Ares6 JavaScript shell benchmark suite
+    shell: ['sm', 'v8']
+    test: ares6
     treeherder:
-        symbol: js-bench(ares6)
-    run:
-        mach: jsshell-bench --binary $JSSHELL --perfherder ares6
+        symbol: ares6
 
 bench-sixspeed:
     description: Six-Speed JavaScript shell benchmark suite
+    shell: ['sm', 'v8']
+    test: six-speed
     treeherder:
-        symbol: js-bench(6speed)
-    run:
-        mach: jsshell-bench --binary $JSSHELL --perfherder six-speed
+        symbol: 6speed
 
 bench-sunspider:
     description: SunSpider JavaScript shell benchmark suite
+    shell: ['sm']
+    test: sunspider
     treeherder:
-        symbol: js-bench(sunspider)
-    run:
-        mach: jsshell-bench --binary $JSSHELL --perfherder sunspider
+        symbol: sunspider
 
 bench-web-tooling:
     description: Web Tooling shell benchmark suite
+    shell: ['sm', 'v8']
+    test: web-tooling-benchmark
     treeherder:
-        symbol: js-bench(webtool)
-    run:
-        mach: jsshell-bench --binary $JSSHELL --perfherder web-tooling-benchmark
+        symbol: webtool
     fetches:
         fetch:
             - web-tooling-benchmark
--- a/taskcluster/taskgraph/transforms/source_test.py
+++ b/taskcluster/taskgraph/transforms/source_test.py
@@ -123,16 +123,46 @@ def split_python(config, jobs):
             else:
                 pyjob['label'] += '-{0}'.format(group)
             symbol = split_symbol(pyjob['treeherder']['symbol'])[1]
             pyjob['treeherder']['symbol'] = join_symbol(group, symbol)
             pyjob['run'][key] = version
             yield pyjob
 
 
+@transforms.add
+def split_jsshell(config, jobs):
+    all_shells = {
+        'sm': "Spidermonkey",
+        'v8': "Google V8"
+    }
+
+    for job in jobs:
+        if not job['name'].startswith('jsshell'):
+            yield job
+            continue
+
+        test = job.pop('test')
+        for shell in job.get('shell', all_shells.keys()):
+            assert shell in all_shells
+
+            new_job = copy.deepcopy(job)
+            new_job['name'] = '{}-{}'.format(new_job['name'], shell)
+            new_job['description'] = '{} on {}'.format(new_job['description'], all_shells[shell])
+            new_job['shell'] = shell
+
+            group = 'js-bench-{}'.format(shell)
+            symbol = split_symbol(new_job['treeherder']['symbol'])[1]
+            new_job['treeherder']['symbol'] = join_symbol(group, symbol)
+
+            run = new_job['run']
+            run['command'] = run['command'].format(shell=shell, SHELL=shell.upper(), test=test)
+            yield new_job
+
+
 def add_build_dependency(config, job):
     """
     Add build dependency to the job and installer_url to env.
     """
     key = job['platform']
     build_labels = config.config.get('dependent-build-platforms', {})
     matches = keymatch(build_labels, key)
     if not matches:
@@ -167,8 +197,30 @@ def handle_platform(config, jobs):
         if 'treeherder' in job:
             job['treeherder']['platform'] = platform
 
         if job.pop('require-build'):
             add_build_dependency(config, job)
 
         del job['platform']
         yield job
+
+
+@transforms.add
+def handle_shell(config, jobs):
+    """
+    Handle the 'shell' property.
+    """
+    fields = [
+        'run-on-projects',
+        'worker.env',
+    ]
+
+    for job in jobs:
+        if not job.get('shell'):
+            yield job
+            continue
+
+        for field in fields:
+            resolve_keyed_by(job, field, item_name=job['name'])
+
+        del job['shell']
+        yield job
--- a/testing/jsshell/benchmark.py
+++ b/testing/jsshell/benchmark.py
@@ -26,19 +26,20 @@ with `ac_add_options --enable-js-shell` 
 
 
 class Benchmark(object):
     __metaclass__ = ABCMeta
     lower_is_better = True
     should_alert = False
     units = 'score'
 
-    def __init__(self, shell, args=None):
+    def __init__(self, shell, args=None, shell_name=None):
         self.shell = shell
         self.args = args
+        self.shell_name = shell_name
 
     @abstractproperty
     def name(self):
         """Returns the string name of the benchmark."""
 
     @abstractproperty
     def path(self):
         """Return the path to the benchmark relative to topsrcdir."""
@@ -65,24 +66,28 @@ class Benchmark(object):
             return self._version
 
         with open(os.path.join(self.path, 'VERSION'), 'r') as fh:
             self._version = fh.read().strip("\r\n\r\n \t")
         return self._version
 
     def reset(self):
         """Resets state between runs."""
+        name = self.name
+        if self.shell_name:
+            name = '{}-{}'.format(name, self.shell_name)
+
         self.perfherder_data = {
             'framework': {
                 'name': 'js-bench',
             },
             'suites': [
                 {
                     'lowerIsBetter': self.lower_is_better,
-                    'name': self.name,
+                    'name': name,
                     'shouldAlert': self.should_alert,
                     'subtests': [],
                     'units': self.units,
                     'value': None
                 },
             ],
         }
         self.suite = self.perfherder_data['suites'][0]
@@ -261,68 +266,71 @@ class WebToolingBenchmark(Benchmark):
         for bench, scores in self.scores.items():
             for score_name, values in scores.items():
                 test_name = "{}-{}".format(self.name, score_name)
                 mean = sum(values) / len(values)
                 self.suite['subtests'].append({'name': test_name, 'value': mean})
                 if score_name == 'mean':
                     bench_mean = mean
         self.suite['value'] = bench_mean
-    
+
     def _provision_benchmark_script(self):
+        if os.path.isdir(self.path):
+            return
+
         # Some benchmarks may have been downloaded from a fetch task, make
         # sure they get copied over.
         fetches_dir = os.environ.get('MOZ_FETCHES_DIR')
         if fetches_dir and os.path.isdir(fetches_dir):
             webtool_fetchdir = os.path.join(fetches_dir, 'web-tooling-benchmark')
             if os.path.isdir(webtool_fetchdir):
                 shutil.copytree(webtool_fetchdir, self.path)
-    
+
     def run(self):
         self._provision_benchmark_script()
         return super(WebToolingBenchmark, self).run()
 
 
 all_benchmarks = {
     'ares6': Ares6,
     'six-speed': SixSpeed,
     'sunspider': SunSpider,
     'web-tooling-benchmark': WebToolingBenchmark
 }
 
 
-def run(benchmark, binary=None, extra_args=None, perfherder=False):
+def run(benchmark, binary=None, extra_args=None, perfherder=None):
     if not binary:
         try:
             binary = os.path.join(build.bindir, 'js' + build.substs['BIN_SUFFIX'])
         except BuildEnvironmentNotFoundException:
             binary = None
 
         if not binary or not os.path.isfile(binary):
             print(JSSHELL_NOT_FOUND)
             return 1
 
-    bench = all_benchmarks.get(benchmark)(binary, args=extra_args)
+    bench = all_benchmarks.get(benchmark)(binary, args=extra_args, shell_name=perfherder)
     res = bench.run()
 
     if perfherder:
         print("PERFHERDER_DATA: {}".format(json.dumps(bench.perfherder_data)))
     return res
 
 
 def get_parser():
     parser = ArgumentParser()
     parser.add_argument('benchmark', choices=all_benchmarks.keys(),
                         help="The name of the benchmark to run.")
     parser.add_argument('-b', '--binary', default=None,
                         help="Path to the JS shell binary to use.")
     parser.add_argument('--arg', dest='extra_args', action='append', default=None,
                         help="Extra arguments to pass to the JS shell.")
-    parser.add_argument('--perfherder', action='store_true', default=False,
-                        help="Log PERFHERDER_DATA to stdout.")
+    parser.add_argument('--perfherder', default=None,
+                        help="Log PERFHERDER_DATA to stdout using the given suite name.")
     return parser
 
 
 def cli(args=sys.argv[1:]):
     parser = get_parser()
     args = parser.parser_args(args)
     return run(**vars(args))