Bug 1512615 Support screenshots for tests r=rwood
authorFlorin Strugariu <bebe@mozilla.ro>
Fri, 04 Jan 2019 13:02:44 +0000
changeset 452547 1a25a39e7fb40462d2413acba6a689094d82c03b
parent 452546 e03824ca4e747d46c26cb704f369eef3422ec035
child 452548 58c32489a2fb49af3a36dfccdf390e4da969bc86
push id110915
push usercsabou@mozilla.com
push dateFri, 04 Jan 2019 16:47:57 +0000
treeherdermozilla-inbound@1d867d35ae2d [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersrwood
bugs1512615
milestone66.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1512615 Support screenshots for tests r=rwood Differential Revision: https://phabricator.services.mozilla.com/D13972
testing/mozharness/mozharness/mozilla/testing/raptor.py
testing/raptor/raptor/control_server.py
testing/raptor/raptor/manifest.py
testing/raptor/raptor/output.py
testing/raptor/raptor/results.py
testing/raptor/webext/raptor/runner.js
--- a/testing/mozharness/mozharness/mozilla/testing/raptor.py
+++ b/testing/mozharness/mozharness/mozilla/testing/raptor.py
@@ -598,24 +598,32 @@ class Raptor(TestingMixin, MercurialScri
                 self.run_command(["ls", "-l", item])
 
         elif '--no-upload-results' not in options:
             if not self.gecko_profile:
                 self._validate_treeherder_data(parser)
             if not self.run_local:
                 # copy results to upload dir so they are included as an artifact
                 self.info("copying raptor results to upload dir:")
+
+                src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json')
                 dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'perfherder-data.json')
                 self.info(str(dest))
-                src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor.json')
                 self._artifact_perf_data(src, dest)
+
                 if self.power_test:
                     src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'raptor-power.json')
                     self._artifact_perf_data(src, dest)
 
+                src = os.path.join(self.query_abs_dirs()['abs_work_dir'], 'screenshots.html')
+                if os.path.exists(src):
+                    dest = os.path.join(env['MOZ_UPLOAD_DIR'], 'screenshots.html')
+                    self.info(str(dest))
+                    self._artifact_perf_data(src, dest)
+
 
 class RaptorOutputParser(OutputParser):
     minidump_regex = re.compile(r'''raptorError: "error executing: '(\S+) (\S+) (\S+)'"''')
     RE_PERF_DATA = re.compile(r'.*PERFHERDER_DATA:\s+(\{.*\})')
 
     def __init__(self, **kwargs):
         super(RaptorOutputParser, self).__init__(**kwargs)
         self.minidump_output = None
--- a/testing/raptor/raptor/control_server.py
+++ b/testing/raptor/raptor/control_server.py
@@ -63,27 +63,35 @@ def MakeCustomHandlerClass(results_handl
 
             if data['type'] == "webext_gecko_profile":
                 # received gecko profiling results
                 _test = str(data['data'][0])
                 _pagecycle = str(data['data'][1])
                 _raw_profile = data['data'][2]
                 LOG.info("received gecko profile for test %s pagecycle %s" % (_test, _pagecycle))
                 self.write_raw_gecko_profile(_test, _pagecycle, _raw_profile)
+            elif data['type'] == 'webext_results':
+                LOG.info("received " + data['type'] + ": " + str(data['data']))
+                self.results_handler.add(data['data'])
+            elif data['type'] == "webext_raptor-page-timeout":
+                LOG.info("received " + data['type'] + ": " + str(data['data']))
+                # pageload test has timed out; record it as a failure
+                self.results_handler.add_page_timeout(str(data['data'][0]),
+                                                      str(data['data'][1]))
+            elif data['data'] == "__raptor_shutdownBrowser":
+                LOG.info("received " + data['type'] + ": " + str(data['data']))
+                # webext is telling us it's done, and time to shutdown the browser
+                self.shutdown_browser()
+            elif data['type'] == 'webext_screenshot':
+                LOG.info("received " + data['type'])
+                self.results_handler.add_image(str(data['data'][0]),
+                                               str(data['data'][1]),
+                                               str(data['data'][2]))
             else:
                 LOG.info("received " + data['type'] + ": " + str(data['data']))
-                if data['type'] == 'webext_results':
-                    self.results_handler.add(data['data'])
-                elif data['type'] == "webext_raptor-page-timeout":
-                    # pageload test has timed out; record it as a failure
-                    self.results_handler.add_page_timeout(str(data['data'][0]),
-                                                          str(data['data'][1]))
-                elif data['data'] == "__raptor_shutdownBrowser":
-                    # webext is telling us it's done, and time to shutdown the browser
-                    self.shutdown_browser()
 
         def do_OPTIONS(self):
             self.send_response(200, "ok")
             self.send_header('Access-Control-Allow-Origin', '*')
             self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
             self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
             self.send_header("Access-Control-Allow-Headers", "Content-Type")
             self.end_headers()
--- a/testing/raptor/raptor/manifest.py
+++ b/testing/raptor/raptor/manifest.py
@@ -106,16 +106,19 @@ def write_test_settings_json(args, test_
     if val == "false":
         test_settings['raptor-options']['subtest_lower_is_better'] = False
     else:
         test_settings['raptor-options']['subtest_lower_is_better'] = True
 
     if test_details.get("alert_threshold", None) is not None:
         test_settings['raptor-options']['alert_threshold'] = float(test_details['alert_threshold'])
 
+    if test_details.get("screen_capture", None) is not None:
+        test_settings['raptor-options']['screen_capture'] = test_details.get("screen_capture")
+
     # if gecko profiling is enabled, write profiling settings for webext
     if test_details.get("gecko_profile", False):
         test_settings['raptor-options']['gecko_profile'] = True
         # when profiling, if webRender is enabled we need to set that, so
         # the runner can add the web render threads to gecko profiling
         test_settings['raptor-options']['gecko_profile_interval'] = \
             float(test_details.get("gecko_profile_interval", 0))
         test_settings['raptor-options']['gecko_profile_entries'] = \
--- a/testing/raptor/raptor/output.py
+++ b/testing/raptor/raptor/output.py
@@ -24,16 +24,17 @@ class Output(object):
     def __init__(self, results, supporting_data):
         """
         - results : list of RaptorTestResult instances
         """
         self.results = results
         self.summarized_results = {}
         self.supporting_data = supporting_data
         self.summarized_supporting_data = []
+        self.summarized_screenshots = []
 
     def summarize(self):
         suites = []
         test_results = {
             'framework': {
                 'name': 'raptor',
             },
             'suites': suites,
@@ -550,35 +551,81 @@ class Output(object):
             _subtests[name]['value'] = round(filter.median(_subtests[name]['replicates']), 2)
             subtests.append(_subtests[name])
             # only use the 'total's to compute the overall result
             if name == 'total':
                 vals.append([_subtests[name]['value'], name])
 
         return subtests, vals
 
+    def summarize_screenshots(self, screenshots):
+        if len(screenshots) == 0:
+            return
+
+        self.summarized_screenshots.append("""<!DOCTYPE html>
+        <head>
+        <style>
+            table, th, td {
+              border: 1px solid black;
+              border-collapse: collapse;
+            }
+        </style>
+        </head>
+        <html> <body>
+        <h1>Captured screenshots!</h1>
+        <table style="width:100%">
+          <tr>
+            <th>Test Name</th>
+            <th>Pagecycle</th>
+            <th>Screenshot</th>
+          </tr>""")
+
+        for screenshot in screenshots:
+            self.summarized_screenshots.append("""<tr>
+            <th>%s</th>
+            <th> %s</th>
+            <th>
+                <img src="%s" alt="%s %s" width="320" height="240">
+            </th>
+            </tr>""" % (screenshot['test_name'],
+                        screenshot['page_cycle'],
+                        screenshot['screenshot'],
+                        screenshot['test_name'],
+                        screenshot['page_cycle']))
+
+        self.summarized_screenshots.append("""</table></body> </html>""")
+
     def output(self):
         """output to file and perfherder data json """
         if self.summarized_results == {}:
             LOG.error("error: no summarized raptor results found!")
             return False
 
         if os.environ['MOZ_UPLOAD_DIR']:
             # i.e. testing/mozharness/build/raptor.json locally; in production it will
             # be at /tasks/task_*/build/ (where it will be picked up by mozharness later
             # and made into a tc artifact accessible in treeherder as perfherder-data.json)
             results_path = os.path.join(os.path.dirname(os.environ['MOZ_UPLOAD_DIR']),
                                         'raptor.json')
+            screenshot_path = os.path.join(os.path.dirname(os.environ['MOZ_UPLOAD_DIR']),
+                                           'screenshots.html')
         else:
             results_path = os.path.join(os.getcwd(), 'raptor.json')
+            screenshot_path = os.path.join(os.getcwd(), 'screenshots.html')
 
         with open(results_path, 'w') as f:
             for result in self.summarized_results:
                 f.write("%s\n" % result)
 
+        if len(self.summarized_screenshots) > 0:
+            with open(screenshot_path, 'w') as f:
+                for result in self.summarized_screenshots:
+                    f.write("%s\n" % result)
+            LOG.info("screen captures can be found locally at: %s" % screenshot_path)
+
         # when gecko_profiling, we don't want results ingested by Perfherder
         extra_opts = self.summarized_results['suites'][0].get('extraOptions', [])
         if 'gecko_profile' not in extra_opts:
             # if we have supporting data i.e. power, we ONLY want those measurements
             # dumped out. TODO: Bug 1515406 - Add option to output both supplementary
             # data (i.e. power) and the regular Raptor test result
             # Both are already available as separate PERFHERDER_DATA json blobs
             if len(self.summarized_supporting_data) == 0:
--- a/testing/raptor/raptor/results.py
+++ b/testing/raptor/raptor/results.py
@@ -14,24 +14,32 @@ LOG = get_proxy_logger(component='result
 
 
 class RaptorResultsHandler():
     """Handle Raptor test results"""
 
     def __init__(self):
         self.results = []
         self.page_timeout_list = []
+        self.images = []
         self.supporting_data = None
 
     def add(self, new_result_json):
         # add to results
         LOG.info("received results in RaptorResultsHandler.add")
         new_result = RaptorTestResult(new_result_json)
         self.results.append(new_result)
 
+    def add_image(self, screenshot, test_name, page_cycle):
+        # add to results
+        LOG.info("received screenshot")
+        self.images.append({'screenshot': screenshot,
+                            'test_name': test_name,
+                            'page_cycle': page_cycle})
+
     def add_page_timeout(self, test_name, page_url):
         self.page_timeout_list.append({'test_name': test_name, 'url': page_url})
 
     def add_supporting_data(self, supporting_data):
         ''' Supporting data is additional data gathered outside of the regular
         Raptor test run (i.e. power data). Will arrive in a dict in the format of:
 
         supporting_data = {'type': 'data-type',
@@ -58,16 +66,17 @@ class RaptorResultsHandler():
             self.supporting_data = []
         self.supporting_data.append(supporting_data)
 
     def summarize_and_output(self, test_config):
         # summarize the result data, write to file and output PERFHERDER_DATA
         LOG.info("summarizing raptor test results")
         output = Output(self.results, self.supporting_data)
         output.summarize()
+        output.summarize_screenshots(self.images)
         if self.supporting_data is not None:
             output.summarize_supporting_data()
             output.output_supporting_data()
         return output.output()
 
 
 class RaptorTestResult():
     """Single Raptor test result class"""
--- a/testing/raptor/webext/raptor/runner.js
+++ b/testing/raptor/webext/raptor/runner.js
@@ -53,16 +53,17 @@ var isTTFIPending = false;
 var isLoadTimePending = false;
 var isBenchmarkPending = false;
 var pageTimeout = 10000; // default pageload timeout
 var geckoProfiling = false;
 var geckoInterval = 1;
 var geckoEntries = 1000000;
 var webRenderEnabled = false;
 var debugMode = 0;
+var screenCapture = false;
 
 var results = {"name": "",
                "page": "",
                "type": "",
                "lower_is_better": true,
                "alert_threshold": 2.0,
                "measurements": {}};
 
@@ -116,16 +117,20 @@ function getTestSettings() {
               geckoEntries = settings.gecko_entries;
             }
             if (settings.webrender_enabled !== undefined) {
               webRenderEnabled = settings.webrender_enabled;
             }
           }
         }
 
+        if (settings.screen_capture !== undefined) {
+          screenCapture = settings.screen_capture;
+        }
+
         if (settings.newtab_per_cycle !== undefined) {
           reuseTab = settings.newtab_per_cycle;
         }
 
         if (settings.page_timeout !== undefined) {
           pageTimeout = settings.page_timeout;
         }
         console.log("using page timeout (ms): " + pageTimeout);
@@ -235,37 +240,74 @@ function waitForResult() {
             !isDCFPending &&
             !isTTFIPending &&
             !isLoadTimePending) {
           cancelTimeoutAlarm("raptor-page-timeout");
           postToControlServer("status", "results received");
           if (geckoProfiling) {
             await getGeckoProfile();
           }
+          if (screenCapture) {
+            await getScreenCapture();
+          }
+
           resolve();
         } else {
           setTimeout(checkForResult, 5);
         }
       } else if (testType == "benchmark") {
         if (!isBenchmarkPending) {
           cancelTimeoutAlarm("raptor-page-timeout");
           postToControlServer("status", "results received");
           if (geckoProfiling) {
             await getGeckoProfile();
           }
           resolve();
+          if (screenCapture) {
+            await getScreenCapture();
+          }
         } else {
           setTimeout(checkForResult, 5);
         }
       }
     }
     checkForResult();
   });
 }
 
+async function getScreenCapture() {
+  console.log("Capturing screenshot...");
+  var capturing;
+  if (["firefox", "geckoview"].includes(browserName)) {
+    capturing = ext.tabs.captureVisibleTab();
+    capturing.then(onCaptured, onError);
+    await capturing;
+  } else {
+    // create capturing promise
+    capturing =  new Promise(function(resolve, reject) {
+    ext.tabs.captureVisibleTab(resolve);
+  });
+
+    // capture and wait for promise to end
+    capturing.then(onCaptured, onError);
+    await capturing;
+  }
+}
+
+function onCaptured(screenshotUri) {
+  console.log("Screenshot capured!");
+  postToControlServer("screenshot", [screenshotUri, testName, pageCycle]);
+}
+
+function onError(error) {
+  console.log("Screenshot captured failed!");
+  console.log(`Error: ${error}`);
+}
+
+
 async function startGeckoProfiling() {
   var _threads;
   if (webRenderEnabled) {
     _threads = ["GeckoMain", "Compositor", "WR,Renderer"];
   } else {
     _threads = ["GeckoMain", "Compositor"];
   }
   postToControlServer("status", "starting gecko profiling");
@@ -471,16 +513,17 @@ function postToControlServer(msgType, ms
     }
   };
 
   client.open("POST", url, true);
 
   client.setRequestHeader("Content-Type", "application/json");
   if (client.readyState == 1) {
     console.log("posting to control server");
+    console.log(msgData);
     var data = { "type": "webext_" + msgType, "data": msgData};
     client.send(JSON.stringify(data));
   }
   if (msgType == "results") {
     // we're finished, move to cleanup
     cleanUp();
   }
 }