Bug 1438292 - add JetStream benchmark to talos. r=rwood
authorJoel Maher <jmaher@mozilla.com>
Thu, 22 Feb 2018 11:43:02 -0500
changeset 404891 6f4aed8caae8d18f2808ea868fb0e8e3802b9bc3
parent 404890 8fff7b57ab62a240f30648f2b942b5e830a6305e
child 404892 5c10e3f4e008003c4d5767c1084642be92c45ff2
push id33494
push useraciure@mozilla.com
push dateThu, 22 Feb 2018 21:55:59 +0000
treeherdermozilla-central@169b1ba48437 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrwood
bugs1438292
milestone60.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1438292 - add JetStream benchmark to talos. r=rwood
testing/mozharness/mozharness/mozilla/testing/talos.py
testing/talos/jetstream-benchmark.manifest
testing/talos/talos.json
testing/talos/talos/output.py
testing/talos/talos/test.py
testing/talos/talos/tests/jetstream/jetstream.manifest
--- a/testing/mozharness/mozharness/mozilla/testing/talos.py
+++ b/testing/mozharness/mozharness/mozilla/testing/talos.py
@@ -188,16 +188,17 @@ class Talos(TestingMixin, MercurialScrip
         self.talos_json = self.config.get("talos_json")
         self.talos_json_config = self.config.get("talos_json_config")
         self.repo_path = self.config.get("repo_path")
         self.obj_path = self.config.get("obj_path")
         self.tests = None
         self.gecko_profile = self.config.get('gecko_profile')
         self.gecko_profile_interval = self.config.get('gecko_profile_interval')
         self.pagesets_name = None
+        self.benchmark_zip = None
         self.mitmproxy_rel_bin = None # some platforms download a mitmproxy release binary
         self.mitmproxy_recording_set = None # zip file found on tooltool that contains all of the mitmproxy recordings
         self.mitmproxy_recordings_file_list = self.config.get('mitmproxy', None) # files inside the recording set
         self.mitmdump = None # path to mitdump tool itself, in py3 venv
 
     # We accept some configuration options from the try commit message in the format mozharness: <options>
     # Example try commit message:
     #   mozharness: --geckoProfile try: <stuff>
@@ -262,18 +263,30 @@ class Talos(TestingMixin, MercurialScrip
     def query_pagesets_name(self):
         """Certain suites require external pagesets to be downloaded and
         extracted.
         """
         if self.pagesets_name:
             return self.pagesets_name
         if self.query_talos_json_config() and self.suite is not None:
             self.pagesets_name = self.talos_json_config['suites'][self.suite].get('pagesets_name')
+            self.pagesets_name_manifest = 'tp5n-pageset.manifest'
             return self.pagesets_name
 
+    def query_benchmark_zip(self):
+        """Certain suites require external benchmarks to be downloaded and
+        extracted.
+        """
+        if self.benchmark_zip:
+            return self.benchmark_zip
+        if self.query_talos_json_config() and self.suite is not None:
+            self.benchmark_zip = self.talos_json_config['suites'][self.suite].get('benchmark_zip')
+            self.benchmark_zip_manifest = 'jetstream-benchmark.manifest'
+            return self.benchmark_zip
+
     def query_mitmproxy_recordings_file_list(self):
         """ When using mitmproxy we also need the name of the playback files that are included
         inside the playback archive.
         """
         if self.mitmproxy_recordings_file_list:
             return self.mitmproxy_recordings_file_list
         if self.query_talos_json_config() and self.suite is not None:
             talos_opts = self.talos_json_config['suites'][self.suite].get('talos_options', None)
@@ -384,38 +397,46 @@ class Talos(TestingMixin, MercurialScrip
                     self.suite = self.config['talos_extra_options'][suite_name_index]
                     self.validate_suite()
                 else:
                     self.fatal("Suite name not provided")
         else:
             # talos initiated in production via mozharness
             self.suite = self.config['suite']
 
-        # now that have the suite name, check if pageset is required, if so download it
-        # the --no-download option will override this
+
+        tooltool_artifacts = []
         if self.query_pagesets_name():
+            tooltool_artifacts.append({'name': self.pagesets_name, 'manifest': self.pagesets_name_manifest})
+
+        if self.query_benchmark_zip():
+            tooltool_artifacts.append({'name': self.benchmark_zip, 'manifest': self.benchmark_zip_manifest})
+
+        # now that have the suite name, check if artifact is required, if so download it
+        # the --no-download option will override this
+        for artifact in tooltool_artifacts:
             if '--no-download' not in self.config.get('talos_extra_options', []):
-                self.info("Downloading pageset with tooltool...")
+                self.info("Downloading %s with tooltool..." % artifact)
                 self.src_talos_webdir = os.path.join(self.talos_path, 'talos')
                 src_talos_pageset = os.path.join(self.src_talos_webdir, 'tests')
-                if not os.path.exists(os.path.join(src_talos_pageset, self.pagesets_name)):
-                    manifest_file = os.path.join(self.talos_path, 'tp5n-pageset.manifest')
+                if not os.path.exists(os.path.join(src_talos_pageset, artifact['name'])):
+                    manifest_file = os.path.join(self.talos_path, artifact['manifest'])
                     self.tooltool_fetch(
                         manifest_file,
                         output_dir=src_talos_pageset,
                         cache=self.config.get('tooltool_cache')
                     )
-                    archive = os.path.join(src_talos_pageset, self.pagesets_name)
+                    archive = os.path.join(src_talos_pageset, artifact['name'])
                     unzip = self.query_exe('unzip')
                     unzip_cmd = [unzip, '-q', '-o', archive, '-d', src_talos_pageset]
                     self.run_command(unzip_cmd, halt_on_failure=True)
                 else:
-                    self.info("pageset already available")
+                    self.info("%s already available" % artifact)
             else:
-                self.info("Not downloading pageset because the no-download option was specified")
+                self.info("Not downloading %s because the no-download option was specified" % artifact)
 
         # if running webkit tests locally, need to copy webkit source into talos/tests
         if self.config.get('run_local') and ('speedometer' in self.suite or
            'stylebench' in self.suite or
            'motionmark' in self.suite):
             self.get_webkit_source()
 
     def get_webkit_source(self):
new file mode 100644
--- /dev/null
+++ b/testing/talos/jetstream-benchmark.manifest
@@ -0,0 +1,9 @@
+[
+  {
+    "size": 10025531,
+    "visibility": "public",
+    "digest": "4ff745d55505720b4b5929476527ac0f9fd75a98151030699d3dec84cb11f24d2ab18f24c98ed063912e709c5126424fa7921080da3daaee31cf50eae5c9591a",
+    "algorithm": "sha512",
+    "filename": "jetstream.zip"
+  }
+]
\ No newline at end of file
--- a/testing/talos/talos.json
+++ b/testing/talos/talos.json
@@ -61,17 +61,18 @@
             "pagesets_name": "tp5n.zip"
         },
         "g5-profiling-e10s": {
             "tests": ["ts_paint_webext", "tp5o_webext"],
             "talos_options": ["--geckoProfile"],
             "pagesets_name": "tp5n.zip"
         },
         "motionmark-e10s": {
-            "tests": ["motionmark_animometer", "motionmark_htmlsuite", "ARES6"]
+            "tests": ["motionmark_animometer", "motionmark_htmlsuite", "JetStream", "ARES6"],
+            "benchmark_zip": "jetstream.zip"
         },
         "svgr-e10s": {
             "tests": ["tsvgx", "tsvgr_opacity", "tart", "tscrollx", "tsvg_static"]
         },
         "svgr-profiling-e10s": {
             "tests": ["tsvgx", "tsvgr_opacity", "tart", "tscrollx", "tsvg_static"],
             "talos_options": ["--geckoProfile"]
         },
--- a/testing/talos/talos/output.py
+++ b/testing/talos/talos/output.py
@@ -251,19 +251,19 @@ class Output(object):
         if len(results) != 160:
             raise Exception("Speedometer has 160 subtests, found: %s instead" % len(results))
 
         results = results[9::10]
         score = 60 * 1000 / filter.geometric_mean(results) / correctionFactor
         return score
 
     @classmethod
-    def ares6_score(cls, val_list):
+    def benchmark_score(cls, val_list):
         """
-        ares6_score: reported as 'geomean'
+        benchmark_score: ares6/jetstream self reported as 'geomean'
         """
         results = [i for i, j in val_list if j == 'geomean']
         return filter.mean(results)
 
     @classmethod
     def stylebench_score(cls, val_list):
         """
         stylebench_score: https://bug-172968-attachments.webkit.org/attachment.cgi?id=319888
@@ -283,17 +283,19 @@ class Output(object):
     def construct_results(self, vals, testname):
         if 'responsiveness' in testname:
             return filter.responsiveness_Metric([val for (val, page) in vals])
         elif testname.startswith('v8_7'):
             return self.v8_Metric(vals)
         elif testname.startswith('kraken'):
             return self.JS_Metric(vals)
         elif testname.startswith('ares6'):
-            return self.ares6_score(vals)
+            return self.benchmark_score(vals)
+        elif testname.startswith('jetstream'):
+            return self.benchmark_score(vals)
         elif testname.startswith('speedometer'):
             return self.speedometer_score(vals)
         elif testname.startswith('stylebench'):
             return self.stylebench_score(vals)
         elif len(vals) > 1:
             return filter.geometric_mean([i for i, j in vals])
         else:
             return filter.mean([i for i, j in vals])
--- a/testing/talos/talos/test.py
+++ b/testing/talos/talos/test.py
@@ -824,16 +824,23 @@ class ARES6(WebkitBenchmark):
 
 @register_test()
 class motionmark_htmlsuite(WebkitBenchmark):
     # MotionMark benchmark used by many browser vendors (from webkit)
     tpmanifest = '${talos}/tests/motionmark/htmlsuite.manifest'
 
 
 @register_test()
+class JetStream(WebkitBenchmark):
+    # JetStream benchmark used by many browser vendors (from webkit)
+    tpmanifest = '${talos}/tests/jetstream/jetstream.manifest'
+    tppagecycles = 1
+
+
+@register_test()
 class perf_reftest(PageloaderTest):
     """
     Style perf-reftest a set of tests where the result is the difference of base vs ref pages
     """
     base_vs_ref = True  # compare the two test pages with eachother and report comparison
     tpmanifest = '${talos}/tests/perf-reftest/perf_reftest.manifest'
     tpcycles = 1
     tppagecycles = 10
new file mode 100644
--- /dev/null
+++ b/testing/talos/talos/tests/jetstream/jetstream.manifest
@@ -0,0 +1,1 @@
+% http://localhost/tests/JetStream/JetStream-1.1/index.html?gecko#long