Bug 1529156 [wpt PR 15305] - Add testdriver support for generate_test_report WebDriver command., a=testonly
authorPaul Meyer <paulmeyer@chromium.org>
Wed, 06 Mar 2019 12:31:01 +0000
changeset 525513 a7fb7ac01c01b3dd651e827a2f8e1118f0ade3c2
parent 525512 bde440dca3faddab96fdb4d85a38c45b7afa1a29
child 525514 53d055724690f5348ff99e07c2f1557f2e6c5b72
push id2032
push userffxbld-merge
push dateMon, 13 May 2019 09:36:57 +0000
treeherdermozilla-release@455c1065dcbe [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewerstestonly
bugs1529156, 15305
milestone67.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1529156 [wpt PR 15305] - Add testdriver support for generate_test_report WebDriver command., a=testonly Automatic update from web-platform-tests Add testdriver support for generate_test_report WebDriver command. Also adds Reporting API tests which utilize this command. -- fix lint errors -- Add comments to tests -- wpt-commits: 5109698f5a9b61797c86fda8000baeb0e701f3f1, 49f474206a2374f3da136f3ddb62cb85ea5c5532, c0a12dae123c560ef835c19e166a253e44275c80 wpt-pr: 15305
testing/web-platform/tests/reporting/bufferSize.html
testing/web-platform/tests/reporting/disconnect.html
testing/web-platform/tests/reporting/generateTestReport.html
testing/web-platform/tests/reporting/nestedReport.html
testing/web-platform/tests/reporting/order.html
testing/web-platform/tests/resources/testdriver.js
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/reporting/bufferSize.html
@@ -0,0 +1,29 @@
+<!DOCTYPE HTML>
+<meta charset=utf-8>
+<title>Reporting: Buffer size</title>
+<link rel="author" title="Paul Meyer" href="paulmeyer@chromium.org">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<script>
+  // Test the buffer size (100) of ReportingObserver.
+  async_test(async function(test) {
+    for (i = 0; i != 110; ++i)
+      await test_driver.generate_test_report("" + i);
+
+    var observer = new ReportingObserver(function(reports) {
+      test.step(function() {
+        // Only (the most recent) 100 reports should be observed, even though
+        // 110 were buffered.
+        assert_equals(reports.length, 100);
+        for(i = 0; i != 100; ++i) {
+          assert_equals(reports[i].body.message, "" + (i + 10));
+        }
+      });
+
+      test.done();
+    }, {buffered: true});
+    observer.observe();
+  }, "Buffer size");
+</script>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/reporting/disconnect.html
@@ -0,0 +1,25 @@
+<!DOCTYPE HTML>
+<meta charset=utf-8>
+<title>Reporting: Disconnect</title>
+<link rel="author" title="Paul Meyer" href="paulmeyer@chromium.org">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<script>
+  async_test(async function(test) {
+    var observer = new ReportingObserver(function(reports, observer) {
+      test.step(function() {
+        assert_equals(reports.length, 1);
+        assert_equals(reports[0].body.message, "Test message.");
+      });
+      test.done();
+    });
+    observer.observe();
+
+    // The observer should still receive this report even though disconnect()
+    // is called immediately afterwards.
+    await test_driver.generate_test_report("Test message.");
+    observer.disconnect();
+  }, "Disconnect");
+</script>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/reporting/generateTestReport.html
@@ -0,0 +1,27 @@
+<!DOCTYPE html>
+<meta charset="utf-8" />
+<title>Reporting: Generate Test Report</title>
+<link rel="author" title="Paul Meyer" href="paulmeyer@chromium.org">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<script>
+  // Test that the "generate_test_report" API works.
+  async_test(function(test) {
+    var observer = new ReportingObserver(function(reports) {
+      test.step(function() {
+        assert_equals(reports.length, 1);
+        // Ensure that the contents of the report are valid.
+        assert_equals(reports[0].type, "test");
+        assert_true(reports[0].url.endsWith("reporting/generateTestReport.html"));
+        assert_equals(reports[0].body.message, "Test message.");
+      });
+      test.done();
+    });
+    observer.observe();
+
+    // This should result in a "test" type report being generated and observed.
+    test_driver.generate_test_report("Test message.");
+  }, "Generate Test Report");
+</script>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/reporting/nestedReport.html
@@ -0,0 +1,30 @@
+<!DOCTYPE HTML>
+<meta charset=utf-8>
+<title>Reporting: Nested report</title>
+<link rel="author" title="Paul Meyer" href="paulmeyer@chromium.org">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<script>
+  // Test that reports can be generated within a ReportingObserver
+  // callback. These reports should be received by the same observer.
+  async_test(function(test) {
+    var step = 0;
+    var observer = new ReportingObserver(async function(reports, observer) {
+      test.step(function() {
+        assert_equals(reports.length, 1);
+        assert_equals(reports[0].body.message, "" + step);
+      });
+
+      ++step;
+      if (step == 3)
+        test.done();
+
+      test_driver.generate_test_report("" + step);
+    });
+    observer.observe();
+
+    test_driver.generate_test_report("0");
+  }, "Nested report");
+</script>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/tests/reporting/order.html
@@ -0,0 +1,32 @@
+<!DOCTYPE HTML>
+<meta charset=utf-8>
+<title>Reporting: Order</title>
+<link rel="author" title="Paul Meyer" href="paulmeyer@chromium.org">
+<script src="/resources/testharness.js"></script>
+<script src="/resources/testharnessreport.js"></script>
+<script src="/resources/testdriver.js"></script>
+<script src="/resources/testdriver-vendor.js"></script>
+<p id="error">No error</p>
+<script>
+  var count = 0;
+  async_test(function(test) {
+    var observer = new ReportingObserver(function(reports) {
+      test.step(function() {
+        // Reports should be received in the same order that they were
+        // generated.
+        for(i in reports) {
+          assert_equals(reports[i].body.message, "" + count++);
+        }
+      });
+
+      if (count == 10)
+        test.done();
+    });
+    observer.observe();
+
+    for (i = 0; i != 10; ++i)
+      test_driver.generate_test_report("" + i);
+  }, "Order");
+</script>
+</body>
+</html>
--- a/testing/web-platform/tests/resources/testdriver.js
+++ b/testing/web-platform/tests/resources/testdriver.js
@@ -189,16 +189,30 @@
                                     (or tick). Authors are not expected to construct the actions
                                     sequence by hand, but to use the builder api provided in
                                     testdriver-actions.js
          * @returns {Promise} fufiled after the actions are performed, or rejected in
          *                    the cases the WebDriver command errors
          */
         action_sequence: function(actions) {
             return window.test_driver_internal.action_sequence(actions);
+        },
+
+        /**
+         * Generates a test report on the current page
+         *
+         * The generate_test_report function generates a report (to be observed
+         * by ReportingObserver) for testing purposes, as described in
+         * {@link https://w3c.github.io/reporting/#generate-test-report-command}
+         *
+         * @returns {Promise} fulfilled after the report is generated, or
+         *                    rejected if the report generation fails
+         */
+        generate_test_report: function(message) {
+            return window.test_driver_internal.generate_test_report(message);
         }
     };
 
     window.test_driver_internal = {
         /**
          * This flag should be set to `true` by any code which implements the
          * internal methods defined below for automation purposes. Doing so
          * allows the library to signal failure immediately when an automated
@@ -276,11 +290,22 @@
         /**
          * Send a sequence of pointer actions
          *
          * @returns {Promise} fufilled after actions are sent, rejected if any actions
          *                    fail
          */
         action_sequence: function(actions) {
             return Promise.reject(new Error("unimplemented"));
+        },
+
+        /**
+         * Generates a test report on the current page
+         *
+         * @param {String} message - the message to be contained in the report
+         * @returns {Promise} fulfilled after the report is generated, or
+         *                    rejected if the report generation fails
+         */
+        generate_test_report: function(message) {
+            return Promise.reject(new Error("unimplemented"));
         }
     };
 })();
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/base.py
@@ -504,17 +504,18 @@ class CallbackHandler(object):
         self.callbacks = {
             "action": self.process_action,
             "complete": self.process_complete
         }
 
         self.actions = {
             "click": ClickAction(self.logger, self.protocol),
             "send_keys": SendKeysAction(self.logger, self.protocol),
-            "action_sequence": ActionSequenceAction(self.logger, self.protocol)
+            "action_sequence": ActionSequenceAction(self.logger, self.protocol),
+            "generate_test_report": GenerateTestReportAction(self.logger, self.protocol)
         }
 
     def __call__(self, result):
         url, command, payload = result
         self.logger.debug("Got async callback: %s" % result[1])
         try:
             callback = self.callbacks[command]
         except KeyError:
@@ -588,8 +589,18 @@ class ActionSequenceAction(object):
                     if (action["type"] == "pointerMove" and
                         isinstance(action["origin"], dict)):
                         action["origin"] = self.get_element(action["origin"]["selector"])
         self.protocol.action_sequence.send_actions({"actions": actions})
 
     def get_element(self, selector):
         element = self.protocol.select.element_by_selector(selector)
         return element
+
+class GenerateTestReportAction(object):
+    def __init__(self, logger, protocol):
+        self.logger = logger
+        self.protocol = protocol
+
+    def __call__(self, payload):
+        message = payload["message"]
+        self.logger.debug("Generating test report: %s" % message)
+        self.protocol.generate_test_report.generate_test_report(message)
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorwebdriver.py
@@ -15,17 +15,18 @@ from .base import (CallbackHandler,
                    strip_server)
 from .protocol import (BaseProtocolPart,
                        TestharnessProtocolPart,
                        Protocol,
                        SelectorProtocolPart,
                        ClickProtocolPart,
                        SendKeysProtocolPart,
                        ActionSequenceProtocolPart,
-                       TestDriverProtocolPart)
+                       TestDriverProtocolPart,
+                       GenerateTestReportProtocolPart)
 from ..testrunner import Stop
 
 import webdriver as client
 
 here = os.path.join(os.path.split(__file__)[0])
 
 
 class WebDriverBaseProtocolPart(BaseProtocolPart):
@@ -183,24 +184,34 @@ class WebDriverTestDriverProtocolPart(Te
             "type": "testdriver-%s" % str(message_type),
             "status": str(status)
         }
         if message:
             obj["message"] = str(message)
         self.webdriver.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
 
 
+class WebDriverGenerateTestReportProtocolPart(GenerateTestReportProtocolPart):
+    def setup(self):
+        self.webdriver = self.parent.webdriver
+
+    def generate_test_report(self, message):
+        json_message = {"message": message}
+        self.webdriver.send_session_command("POST", "reporting/generate_test_report", json_message)
+
+
 class WebDriverProtocol(Protocol):
     implements = [WebDriverBaseProtocolPart,
                   WebDriverTestharnessProtocolPart,
                   WebDriverSelectorProtocolPart,
                   WebDriverClickProtocolPart,
                   WebDriverSendKeysProtocolPart,
                   WebDriverActionSequenceProtocolPart,
-                  WebDriverTestDriverProtocolPart]
+                  WebDriverTestDriverProtocolPart,
+                  WebDriverGenerateTestReportProtocolPart]
 
     def __init__(self, executor, browser, capabilities, **kwargs):
         super(WebDriverProtocol, self).__init__(executor, browser)
         self.capabilities = capabilities
         self.url = browser.webdriver_url
         self.webdriver = None
 
     def connect(self):
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/executors/protocol.py
@@ -260,31 +260,46 @@ class ClickProtocolPart(ProtocolPart):
 
     @abstractmethod
     def element(self, element):
         """Perform a trusted click somewhere on a specific element.
 
         :param element: A protocol-specific handle to an element."""
         pass
 
+
 class SendKeysProtocolPart(ProtocolPart):
     """Protocol part for performing trusted clicks"""
     __metaclass__ = ABCMeta
 
     name = "send_keys"
 
     @abstractmethod
     def send_keys(self, element, keys):
         """Send keys to a specific element.
 
         :param element: A protocol-specific handle to an element.
         :param keys: A protocol-specific handle to a string of input keys."""
         pass
 
 
+class GenerateTestReportProtocolPart(ProtocolPart):
+    """Protocol part for generating test reports"""
+    __metaclass__ = ABCMeta
+
+    name = "generate_test_report"
+
+    @abstractmethod
+    def generate_test_report(self, message):
+        """Generate a test report.
+
+        :param message: The message to be contained in the report."""
+        pass
+
+
 class ActionSequenceProtocolPart(ProtocolPart):
     """Protocol part for performing trusted clicks"""
     __metaclass__ = ABCMeta
 
     name = "action_sequence"
 
     @abstractmethod
     def send_actions(self, actions):
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testdriver-extra.js
@@ -85,9 +85,18 @@
                         action.origin = {selector: get_selector(action.origin)};
                     }
                 }
             }
         }
         window.__wptrunner_message_queue.push({"type": "action", "action": "action_sequence", "actions": actions});
         return pending_promise;
     };
+
+    window.test_driver_internal.generate_test_report = function(message) {
+        const pending_promise = new Promise(function(resolve, reject) {
+            pending_resolve = resolve;
+            pending_reject = reject;
+        });
+        window.__wptrunner_message_queue.push({"type": "action", "action": "generate_test_report", "message": message});
+        return pending_promise;
+    };
 })();