Bug 1472979 - Add Web Tooling Benchmark as a jsshell-bench test r=jmaher
authorIonut Goldan <igoldan@mozilla.com>
Mon, 13 Aug 2018 07:29:07 +0000
changeset 486273 957f7aa848b1a46d824d5e11de2e62c63ac4a526
parent 486272 d05ea252346c6cca1b9148b315b9bc5678b6fd19
child 486285 bf79440c1376b1e1114ba653917e1577d7b1007b
child 486286 aeb2e2eaf0c48397b5ca52347a21c74f45366aec
push id9719
push userffxbld-merge
push dateFri, 24 Aug 2018 17:49:46 +0000
treeherdermozilla-beta@719ec98fba77 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersjmaher
bugs1472979
milestone63.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1472979 - Add Web Tooling Benchmark as a jsshell-bench test r=jmaher Add Web Tooling Benchmark & (WIP) setup fetch task Differential Revision: https://phabricator.services.mozilla.com/D2646
taskcluster/ci/fetch/benchmarks.yml
taskcluster/ci/source-test/jsshell.yml
taskcluster/ci/source-test/kind.yml
taskcluster/taskgraph/transforms/fetch.py
testing/jsshell/benchmark.py
--- a/taskcluster/ci/fetch/benchmarks.yml
+++ b/taskcluster/ci/fetch/benchmarks.yml
@@ -10,8 +10,17 @@ unity-webgl:
 assorted-dom:
   description: assorted-dom benchmark
   fetch:
     type: static-url
     artifact-name: assorted-dom.zip
     url: https://github.com/mozilla/perf-automation/releases/download/assorted-dom-v1/assorted-dom-4befd28725c6.zip
     sha256: e4eafe4a8e70c7ae6d42d668d3b1640b9fd9b696c486ff35aab754c368f78c2c
     size: 402665
+
+web-tooling-benchmark:
+  description: Web Tooling Benchmark
+  fetch:
+    type: static-url
+    artifact-name: web-tooling-benchmark.zip
+    url: https://github.com/mozilla/perf-automation/releases/download/V1/web-tooling-benchmark-b2ac25c897c9.zip
+    sha256: 93b0b51df0cec3ca9bfa0bdf81d782306dcf18532e39b3ff3180409125daaff1
+    size: 5444135
--- a/taskcluster/ci/source-test/jsshell.yml
+++ b/taskcluster/ci/source-test/jsshell.yml
@@ -37,8 +37,18 @@ bench-sixspeed:
         mach: jsshell-bench --binary $JSSHELL --perfherder six-speed
 
 bench-sunspider:
     description: SunSpider JavaScript shell benchmark suite
     treeherder:
         symbol: js-bench(sunspider)
     run:
         mach: jsshell-bench --binary $JSSHELL --perfherder sunspider
+
+bench-web-tooling:
+    description: Web Tooling shell benchmark suite
+    treeherder:
+        symbol: js-bench(webtool)
+    run:
+        mach: jsshell-bench --binary $JSSHELL --perfherder web-tooling-benchmark
+    fetches:
+        fetch:
+            - web-tooling-benchmark
--- a/taskcluster/ci/source-test/kind.yml
+++ b/taskcluster/ci/source-test/kind.yml
@@ -5,16 +5,19 @@
 loader: taskgraph.loader.transform:loader
 
 transforms:
    - taskgraph.transforms.try_job:transforms
    - taskgraph.transforms.source_test:transforms
    - taskgraph.transforms.job:transforms
    - taskgraph.transforms.task:transforms
 
+kind-dependencies:
+   - fetch
+
 jobs-from:
    - cram.yml
    - doc.yml
    - file-metadata.yml
    - jsshell.yml
    - mocha.yml
    - mozlint.yml
    - python.yml
--- a/taskcluster/taskgraph/transforms/fetch.py
+++ b/taskcluster/taskgraph/transforms/fetch.py
@@ -88,16 +88,19 @@ def validate(config, jobs):
 
         yield job
 
 
 @transforms.add
 def process_fetch_job(config, jobs):
     # Converts fetch-url entries to the job schema.
     for job in jobs:
+        if 'fetch' not in job:
+            continue
+
         typ = job['fetch']['type']
 
         if typ == 'static-url':
             yield create_fetch_url_task(config, job)
         else:
             # validate() should have caught this.
             assert False
 
--- a/testing/jsshell/benchmark.py
+++ b/testing/jsshell/benchmark.py
@@ -2,16 +2,17 @@
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
 from __future__ import absolute_import, print_function
 
 import json
 import os
 import re
+import shutil
 import sys
 from abc import ABCMeta, abstractmethod, abstractproperty
 from argparse import ArgumentParser
 from collections import defaultdict
 
 from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
 from mozprocess import ProcessHandler
 
@@ -103,18 +104,18 @@ class Benchmark(object):
 
 class RunOnceBenchmark(Benchmark):
     def collect_results(self):
         bench_total = 0
         # NOTE: for this benchmark we run the test once, so we have a single value array
         for bench, scores in self.scores.items():
             for score, values in scores.items():
                 test_name = "{}-{}".format(self.name, score)
-                total = sum(values) / len(values)
-                self.suite['subtests'].append({'name': test_name, 'value': total})
+                mean = sum(values) / len(values)
+                self.suite['subtests'].append({'name': test_name, 'value': mean})
                 bench_total += int(sum(values))
         self.suite['value'] = bench_total
 
 
 class Ares6(Benchmark):
     name = 'ares6'
     path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'ARES-6')
     units = 'ms'
@@ -159,19 +160,19 @@ class Ares6(Benchmark):
 
         m = re.search('summary:\s*(\d+\.?\d*?) (\+-)?.+', line)
         if m:
             self.last_summary = float(m.group(1))
 
     def collect_results(self):
         for bench, scores in self.scores.items():
             for score, values in scores.items():
-                total = sum(values) / len(values)
+                mean = sum(values) / len(values)
                 test_name = "{}-{}".format(bench, score)
-                self.suite['subtests'].append({'name': test_name, 'value': total})
+                self.suite['subtests'].append({'name': test_name, 'value': mean})
 
         if self.last_summary:
             self.suite['value'] = self.last_summary
 
 
 class SixSpeed(RunOnceBenchmark):
     name = 'six-speed'
     path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'six-speed')
@@ -223,20 +224,73 @@ class SunSpider(RunOnceBenchmark):
             return
         subtest = m.group(1)
         score = m.group(2)
         if subtest not in self.scores[self.name]:
             self.scores[self.name][subtest] = []
         self.scores[self.name][subtest].append(int(score))
 
 
+class WebToolingBenchmark(Benchmark):
+    name = 'web-tooling-benchmark'
+    path = os.path.join('third_party', 'webkit', 'PerformanceTests', 'web-tooling-benchmark')
+    main_js = 'cli.js'
+
+    @property
+    def command(self):
+        cmd = super(WebToolingBenchmark, self).command
+        return cmd + [self.main_js]
+
+    def reset(self):
+        super(WebToolingBenchmark, self).reset()
+
+        # Scores are of the form:
+        # {<bench_name>: {<score_name>: [<values>]}}
+        self.scores = defaultdict(lambda: defaultdict(list))
+
+    def process_line(self, output):
+        m = re.search(" +([a-zA-Z].+): +([.0-9]+) +runs/sec", output)
+        if not m:
+            return
+        subtest = m.group(1)
+        score = m.group(2)
+        if subtest not in self.scores[self.name]:
+            self.scores[self.name][subtest] = []
+        self.scores[self.name][subtest].append(float(score))
+
+    def collect_results(self):
+        # NOTE: for this benchmark we run the test once, so we have a single value array
+        for bench, scores in self.scores.items():
+            for score_name, values in scores.items():
+                test_name = "{}-{}".format(self.name, score_name)
+                mean = sum(values) / len(values)
+                self.suite['subtests'].append({'name': test_name, 'value': mean})
+                if score_name == 'mean':
+                    bench_mean = mean
+        self.suite['value'] = bench_mean
+    
+    def _provision_benchmark_script(self):
+        # Some benchmarks may have been downloaded from a fetch task, make
+        # sure they get copied over.
+        fetches_dir = os.environ.get('MOZ_FETCHES_DIR')
+        if fetches_dir and os.path.isdir(fetches_dir):
+            webtool_fetchdir = os.path.join(fetches_dir, 'web-tooling-benchmark')
+            if os.path.isdir(webtool_fetchdir):
+                shutil.copytree(webtool_fetchdir, self.path)
+    
+    def run(self):
+        self._provision_benchmark_script()
+        return super(WebToolingBenchmark, self).run()
+
+
 all_benchmarks = {
     'ares6': Ares6,
     'six-speed': SixSpeed,
-    'sunspider': SunSpider
+    'sunspider': SunSpider,
+    'web-tooling-benchmark': WebToolingBenchmark
 }
 
 
 def run(benchmark, binary=None, extra_args=None, perfherder=False):
     if not binary:
         try:
             binary = os.path.join(build.bindir, 'js' + build.substs['BIN_SUFFIX'])
         except BuildEnvironmentNotFoundException: