Bug 1643103 - Fix how system layers work with the test layers. r=mozperftest-reviewers,tarek
authorGregory Mierzwinski <gmierz2@outlook.com>
Fri, 05 Jun 2020 13:42:14 +0000
changeset 534135 be1f54c6e5ec28cca1eb7d1c1bd7802774618c75
parent 534134 2b6e363687b77f74c1935a6cd9078de6cb3bf38a
child 534136 c70d67a20c16f81a32122ab697d1d715c62d12b3
push id37483
push userapavel@mozilla.com
push dateFri, 05 Jun 2020 21:40:11 +0000
treeherdermozilla-central@dadc7312128e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmozperftest-reviewers, tarek
bugs1643103
milestone79.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1643103 - Fix how system layers work with the test layers. r=mozperftest-reviewers,tarek This patch fixes how the system and browser/test layers are run. With this fix, the system layer no longer fully runs its setup and teardown stages before the browser layer has started and finished. Now the setup/teardown happens before/after the full test layer run. Depends on D78016 Differential Revision: https://phabricator.services.mozilla.com/D78128
python/mozperftest/mozperftest/environment.py
python/mozperftest/mozperftest/metrics/common.py
python/mozperftest/mozperftest/metrics/perfherder.py
python/mozperftest/mozperftest/runner.py
python/mozperftest/mozperftest/tests/test_browsertime.py
python/mozperftest/mozperftest/tests/test_environment.py
python/mozperftest/mozperftest/tests/test_perftestnotebook.py
--- a/python/mozperftest/mozperftest/environment.py
+++ b/python/mozperftest/mozperftest/environment.py
@@ -10,17 +10,17 @@ import shutil
 
 from mozperftest.test import pick_test
 from mozperftest.system import pick_system
 from mozperftest.metrics import pick_metrics
 from mozperftest.layers import Layers
 from mozperftest.utils import download_file
 
 
-SYSTEM, BROWSER, METRICS = 0, 1, 2
+SYSTEM, TEST, METRICS = 0, 1, 2
 
 
 class MachEnvironment:
     def __init__(self, mach_cmd, flavor="script", **kwargs):
         self._mach_cmd = mach_cmd
         self._mach_args = dict(
             [(self._normalize(key), value) for key, value in kwargs.items()]
         )
@@ -85,50 +85,42 @@ class MachEnvironment:
     def unfreeze(self):
         self._mach_args = self._saved_mach_args
         self._saved_mach_args = None
 
     def run(self, metadata):
         has_exc_handler = self.has_hook("on_exception")
 
         # run the system and test layers
-        for layer in self.layers[:-1]:
-            with layer:
-                try:
-                    metadata = layer(metadata)
-                except Exception as e:
-                    if has_exc_handler:
-                        # if the hook returns True, we abort and return
-                        # without error. If it returns False, we continue
-                        # the loop. The hook can also raise an exception or
-                        # re-raise this exception.
-                        if not self.run_hook("on_exception", layer, e):
-                            return metadata
-                    else:
-                        raise
-        # then run the metrics layers
-        with self.layers[METRICS] as metrics:
+        with self.layers[SYSTEM] as syslayer, self.layers[TEST] as testlayer:
+            metadata = syslayer(metadata)
             try:
-                metadata = metrics(metadata)
+                metadata = testlayer(metadata)
             except Exception as e:
                 if has_exc_handler:
-                    if not self.run_hook("on_exception", layer, e):
+                    # if the hook returns True, we abort and return
+                    # without error. If it returns False, we continue
+                    # the loop. The hook can also raise an exception or
+                    # re-raise this exception.
+                    if not self.run_hook("on_exception", testlayer, e):
                         return metadata
                 else:
                     raise
+
+        # then run the metrics layers
+        with self.layers[METRICS] as metrics:
+            metadata = metrics(metadata)
+
         return metadata
 
     def __enter__(self):
-        for layer in self.layers:
-            layer.__enter__()
         return self
 
     def __exit__(self, type, value, traceback):
-        for layer in self.layers:
-            layer.__exit__(type, value, traceback)
+        return
 
     def cleanup(self):
         if self.tmp_dir is None:
             return
         shutil.rmtree(self.tmp_dir)
         self.tmp_dir = None
 
     def _load_hooks(self):
--- a/python/mozperftest/mozperftest/metrics/common.py
+++ b/python/mozperftest/mozperftest/metrics/common.py
@@ -130,19 +130,17 @@ class MetricsStorage(object):
                 prefix = "{}-{}".format(self.prefix, data_type)
             config = {
                 "output": self.output_path,
                 "prefix": prefix,
                 "customtransformer": data_info["transformer"],
                 "file_groups": {data_type: data_info["files"]},
             }
 
-            ptnb = PerftestETL(
-                config["file_groups"], config, data_info["transformer"]
-            )
+            ptnb = PerftestETL(config["file_groups"], config, data_info["transformer"])
             r = ptnb.process(**data_info["options"])
             self.stddata[data_type] = r["data"]
 
         return self.stddata
 
     def filtered_metrics(
         self,
         group_name="firefox",
--- a/python/mozperftest/mozperftest/metrics/perfherder.py
+++ b/python/mozperftest/mozperftest/metrics/perfherder.py
@@ -107,19 +107,17 @@ class Perfherder(Layer):
             settings = fullsettings[name]
 
             # XXX Instead of just passing replicates here, we should build
             # up a partial perfherder data blob (with options) and subtest
             # overall values.
             subtests = {}
             for r in res:
                 vals = [
-                    v["value"]
-                    for v in r["data"]
-                    if type(v["value"]) in (int, float)
+                    v["value"] for v in r["data"] if type(v["value"]) in (int, float)
                 ]
                 if vals:
                     subtests[r["subtest"]] = vals
 
             perfherder_data = self._build_blob(
                 subtests,
                 name=name,
                 extra_options=settings.get("extraOptions"),
--- a/python/mozperftest/mozperftest/runner.py
+++ b/python/mozperftest/mozperftest/runner.py
@@ -85,17 +85,17 @@ def run_tests(mach_cmd, **kwargs):
     `PERFTEST_OPTIONS` environment variable that contains all options passed by
     the user via a ./mach perftest --push-to-try call.
     """
     _setup_path()
     on_try = kwargs.pop("on_try", False)
 
     # trying to get the arguments from the task params
     if on_try:
-        try_options = json.loads(os.environ['PERFTEST_OPTIONS'])
+        try_options = json.loads(os.environ["PERFTEST_OPTIONS"])
         kwargs.update(try_options)
 
     from mozperftest.utils import build_test_list, install_package
     from mozperftest import MachEnvironment, Metadata
 
     flavor = kwargs["flavor"]
     kwargs["tests"], tmp_dir = build_test_list(
         kwargs["tests"], randomized=flavor != "doc"
--- a/python/mozperftest/mozperftest/tests/test_browsertime.py
+++ b/python/mozperftest/mozperftest/tests/test_browsertime.py
@@ -4,17 +4,17 @@ import mozunit
 from unittest import mock
 import shutil
 import string
 import random
 
 import pytest
 
 from mozperftest.tests.support import get_running_env, EXAMPLE_TEST
-from mozperftest.environment import BROWSER
+from mozperftest.environment import TEST
 from mozperftest.test.browsertime import add_options
 from mozperftest.test.browsertime.runner import (
     NodeException,
     matches,
     extract_browser_name,
 )
 from mozperftest.utils import silence, temporary_env
 
@@ -39,17 +39,17 @@ def test_browser(*mocked):
     mach_cmd, metadata, env = get_running_env(
         android=True,
         android_app_name="something",
         browsertime_geckodriver="GECKODRIVER",
         browsertime_iterations=1,
         browsertime_extra_options="one=1,two=2",
     )
 
-    browser = env.layers[BROWSER]
+    browser = env.layers[TEST]
     env.set_arg("tests", [EXAMPLE_TEST])
 
     try:
         with browser as b, silence():
             b(metadata)
     finally:
         shutil.rmtree(mach_cmd._mach_context.state_dir)
     assert mach_cmd.run_process.call_count == 1
@@ -84,17 +84,17 @@ def test_browser_failed(*mocked):
         android=True,
         android_app_name="something",
         browsertime_geckodriver="GECKODRIVER",
         browsertime_iterations=1,
         browsertime_extra_options="one=1,two=2",
     )
     # set the return value to 1 to simulate a node failure
     mach_cmd.run_process.return_value = 1
-    browser = env.layers[BROWSER]
+    browser = env.layers[TEST]
     env.set_arg("tests", [EXAMPLE_TEST])
 
     with browser as b, silence(), pytest.raises(NodeException):
         b(metadata)
 
 
 @mock.patch("mozperftest.test.browsertime.runner.install_package")
 @mock.patch(
@@ -104,17 +104,17 @@ def test_browser_failed(*mocked):
 @mock.patch(
     "mozperftest.test.browsertime.runner.BrowsertimeRunner._setup_node_packages",
     new=lambda x, y: None,
 )
 def test_browser_desktop(*mocked):
     mach_cmd, metadata, env = get_running_env(
         browsertime_iterations=1, browsertime_extra_options="one=1,two=2",
     )
-    browser = env.layers[BROWSER]
+    browser = env.layers[TEST]
     env.set_arg("tests", [EXAMPLE_TEST])
 
     try:
         with browser as b, silence():
             # just checking that the setup_helper property gets
             # correctly initialized
             browsertime = browser.layers[-1]
             assert browsertime.setup_helper is not None
@@ -146,17 +146,17 @@ def test_add_options():
 )
 @mock.patch("mozbuild.artifact_cache.ArtifactCache.fetch", new=fetch)
 @mock.patch("mozperftest.test.browsertime.runner.BrowsertimeRunner.setup_helper")
 def test_install_url(*mocked):
     url = "https://here/tarball/" + "".join(
         [random.choice(string.hexdigits[:-6]) for c in range(40)]
     )
     mach, metadata, env = get_running_env(browsertime_install_url=url)
-    browser = env.layers[BROWSER]
+    browser = env.layers[TEST]
     env.set_arg("tests", [EXAMPLE_TEST])
 
     try:
         with temporary_env(MOZ_AUTOMATION="1"), browser as b, silence():
             b(metadata)
     finally:
         shutil.rmtree(mach._mach_context.state_dir)
 
@@ -169,17 +169,17 @@ def test_install_url(*mocked):
 )
 @mock.patch("mozbuild.artifact_cache.ArtifactCache.fetch", new=fetch)
 @mock.patch(
     "mozperftest.test.browsertime.runner.BrowsertimeRunner._setup_node_packages",
     new=lambda x, y: None,
 )
 def test_install_url_bad(*mocked):
     mach, metadata, env = get_running_env(browsertime_install_url="meh")
-    browser = env.layers[BROWSER]
+    browser = env.layers[TEST]
     env.set_arg("tests", [EXAMPLE_TEST])
 
     with pytest.raises(ValueError):
         try:
             with browser as b, silence():
                 b(metadata)
         finally:
             shutil.rmtree(mach._mach_context.state_dir)
--- a/python/mozperftest/mozperftest/tests/test_environment.py
+++ b/python/mozperftest/mozperftest/tests/test_environment.py
@@ -114,28 +114,29 @@ def test_exception_raised():
 
 
 def test_metrics_last():
     mach, metadata, env = get_running_env()
 
     system = create_mock()
     browser = create_mock()
 
-    # check that the metrics layer is called after
+    # check that the metrics layer is entered after
     # other have finished
     class M:
         def __enter__(self):
+            system.teardown.assert_called()
+            browser.teardown.assert_called()
             return self
 
         def __exit__(self, *args, **kw):
             return
 
         def __call__(self, metadata):
-            system.teardown.assert_called()
-            browser.teardown.assert_called()
+            return
 
     env.layers = [system, browser, M()]
     with env:
         env.run(metadata)
 
 
 if __name__ == "__main__":
     mozunit.main()
--- a/python/mozperftest/mozperftest/tests/test_perftestnotebook.py
+++ b/python/mozperftest/mozperftest/tests/test_perftestnotebook.py
@@ -14,23 +14,21 @@ def test_init(ptnb, standarized_data):
 def test_get_notebook_section(ptnb):
     func = "scatterplot"
     with (ptnb.const.here / "notebook-sections" / func).open() as f:
         assert ptnb.get_notebook_section(func) == f.read()
 
 
 def test_get_notebook_section_unknown_analysis(ptnb):
     func = "unknown"
-    with mock.patch(
-        "mozperftest.metrics.notebook.perftestnotebook.logger"
-    ) as logger:
+    with mock.patch("mozperftest.metrics.notebook.perftestnotebook.logger") as logger:
         assert ptnb.get_notebook_section(func) == ""
-        logger.assert_has_calls(mock.call.warning(
-                'Could not find the notebook-section called unknown'
-            ))
+        logger.assert_has_calls(
+            mock.call.warning("Could not find the notebook-section called unknown")
+        )
 
 
 @pytest.mark.parametrize("analysis", [["scatterplot"], None])
 def test_post_to_iodide(ptnb, standarized_data, analysis):
 
     opener = mock.mock_open()
 
     def mocked_open(self, *args, **kwargs):