Bug 1333800 - Part b: Support running specific WPT tests in the JS shell; r=luke,bbouvier,jgraham
--- a/build/virtualenv_packages.txt
+++ b/build/virtualenv_packages.txt
@@ -52,16 +52,18 @@ mozilla.pth:testing/marionette/client
mozilla.pth:testing/marionette/harness
mozilla.pth:testing/marionette/harness/marionette_harness/runner/mixins/browsermob-proxy-py
mozilla.pth:testing/marionette/puppeteer/firefox
mozilla.pth:testing/raptor
mozilla.pth:testing/talos
packages.txt:testing/mozbase/packages.txt
mozilla.pth:tools
mozilla.pth:testing/web-platform
+mozilla.pth:testing/web-platform/tests/tools/third_party/html5lib
+mozilla.pth:testing/web-platform/tests/tools/third_party/webencodings
mozilla.pth:testing/web-platform/tests/tools/wptrunner
mozilla.pth:testing/web-platform/tests/tools/wptserve
mozilla.pth:testing/web-platform/tests/tools/six
mozilla.pth:testing/xpcshell
mozilla.pth:third_party/python/mock-1.0.0
mozilla.pth:xpcom/typelib/xpt/tools
mozilla.pth:tools/docs
mozilla.pth:media/webrtc/trunk/tools/gyp/pylib
--- a/js/src/tests/jstests.py
+++ b/js/src/tests/jstests.py
@@ -13,28 +13,31 @@ from __future__ import print_function
import os
import sys
import textwrap
import platform
from os.path import abspath, dirname, isfile, realpath
from contextlib import contextmanager
from copy import copy
+from itertools import chain
from subprocess import list2cmdline, call
from lib.tests import RefTestCase, get_jitflags, get_cpu_count, \
get_environment_overlay, change_env
from lib.results import ResultsSink
from lib.progressbar import ProgressBar
if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):
from lib.tasks_unix import run_all_tests
else:
from lib.tasks_win import run_all_tests
+here = dirname(abspath(__file__))
+
@contextmanager
def changedir(dirname):
pwd = os.getcwd()
os.chdir(dirname)
try:
yield
finally:
@@ -287,16 +290,96 @@ def parse_args():
# Hide the progress bar if it will get in the way of other output.
options.hide_progress = (options.format == 'automation' or
not ProgressBar.conservative_isatty() or
options.hide_progress)
return (options, prefix, requested_paths, excluded_paths)
+def load_wpt_tests(requested_paths, excluded_paths, debug):
+ """Return a list of `RefTestCase` objects for the jsshell testharness.js
+ tests filtered by the given paths and debug-ness."""
+ repo_root = abspath(os.path.join(here, "..", "..", ".."))
+ wp = os.path.join(repo_root, "testing", "web-platform")
+ wpt = os.path.join(wp, "tests")
+
+ sys_paths = [
+ "python/mozterm",
+ "testing/mozbase/mozcrash",
+ "testing/mozbase/mozdevice",
+ "testing/mozbase/mozfile",
+ "testing/mozbase/mozinfo",
+ "testing/mozbase/mozleak",
+ "testing/mozbase/mozlog",
+ "testing/mozbase/mozprocess",
+ "testing/mozbase/mozprofile",
+ "testing/mozbase/mozrunner",
+ "testing/web-platform/tests/tools",
+ "testing/web-platform/tests/tools/third_party/html5lib",
+ "testing/web-platform/tests/tools/third_party/webencodings",
+ "testing/web-platform/tests/tools/wptrunner",
+ "testing/web-platform/tests/tools/wptserve",
+ ]
+ abs_sys_paths = [os.path.join(repo_root, path) for path in sys_paths]
+
+ failed = False
+ for path in abs_sys_paths:
+ if not os.path.isdir(path):
+ failed = True
+ print("Could not add '%s' to the path")
+ if failed:
+ return []
+
+ sys.path[0:0] = abs_sys_paths
+
+ from wptrunner import products, testloader, wptcommandline, wpttest, wptlogging
+
+ wptlogging.setup({}, {})
+ kwargs = {
+ "config": None,
+ "tests_root": wpt,
+ "metadata_root": os.path.join(wp, "meta"),
+ "gecko_e10s": False,
+ "verify": False,
+ }
+ wptcommandline.set_from_config(kwargs)
+ test_paths = kwargs["test_paths"]
+
+ def filter_jsshell_tests(it):
+ for test in it:
+ if test[1].get("jsshell"):
+ yield test
+
+ test_manifests = testloader.ManifestLoader(test_paths, types=["testharness"],
+ meta_filters=[filter_jsshell_tests]).load()
+
+ run_info_extras = products.load_product(kwargs["config"], "firefox")[-1](**kwargs)
+ run_info = wpttest.get_run_info(kwargs["metadata_root"], "firefox",
+ debug=debug, extras=run_info_extras)
+
+ path_filter = testloader.TestFilter(test_manifests,
+ include=requested_paths,
+ exclude=excluded_paths)
+ loader = testloader.TestLoader(test_manifests,
+ ["testharness"],
+ run_info,
+ manifest_filters=[path_filter])
+
+ extra_helper_paths = [
+ os.path.join(wpt, "resources", "testharness.js"),
+ os.path.join(here, "testharnessreport.js"),
+ ]
+
+ return [
+ RefTestCase(wpt, test_path, extra_helper_paths=extra_helper_paths, wpt=test)
+ for test_path, test_type, test in loader.iter_tests()
+ ]
+
+
def load_tests(options, requested_paths, excluded_paths):
"""
Returns a tuple: (test_count, test_gen)
test_count: [int] Number of tests that will be in test_gen
test_gen: [iterable<Test>] Tests found that should be run.
"""
import lib.manifest as manifest
@@ -311,16 +394,21 @@ def load_tests(options, requested_paths,
xul_info = manifest.XULInfo(xul_abi, xul_os, xul_debug)
xul_tester = manifest.XULInfoTester(xul_info, options.js_shell)
test_dir = dirname(abspath(__file__))
path_options = PathOptions(test_dir, requested_paths, excluded_paths)
test_count = manifest.count_tests(test_dir, path_options)
test_gen = manifest.load_reftests(test_dir, path_options, xul_tester)
+ wpt_tests = load_wpt_tests(requested_paths, excluded_paths,
+ debug=xul_tester.test("isDebugBuild"))
+ test_count += len(wpt_tests)
+ test_gen = chain(test_gen, wpt_tests)
+
if options.test_reflect_stringify is not None:
def trs_gen(tests):
for test in tests:
test.test_reflect_stringify = options.test_reflect_stringify
# Even if the test is not normally expected to pass, we still
# expect reflect-stringify to be able to handle it.
test.expect = True
test.random = False
--- a/js/src/tests/lib/manifest.py
+++ b/js/src/tests/lib/manifest.py
@@ -323,16 +323,19 @@ def _parse_test_header(fullpath, testcas
def _parse_external_manifest(filename, relpath):
"""
Reads an external manifest file for test suites whose individual test cases
can't be decorated with reftest comments.
filename - str: name of the manifest file
relpath - str: relative path of the directory containing the manifest
within the test suite
"""
+ if not os.path.exists(filename):
+ return []
+
entries = []
with open(filename, 'r') as fp:
manifest_re = re.compile(r'^\s*(.*)\s+(include|script)\s+(\S+)$')
for line in fp:
line, _, comment = line.partition('#')
line = line.strip()
if not line:
@@ -422,13 +425,13 @@ def load_reftests(location, path_options
# Get the full path and relative location of the file.
filename = os.path.join(root, basename)
if not _is_test_file(root, basename, filename, path_options):
continue
# Skip empty files.
fullpath = os.path.join(location, filename)
- testcase = RefTestCase(filename)
+ testcase = RefTestCase(location, filename)
_apply_external_manifests(filename, testcase, externalManifestEntries,
xul_tester)
_parse_test_header(fullpath, testcase, xul_tester)
yield testcase
--- a/js/src/tests/lib/results.py
+++ b/js/src/tests/lib/results.py
@@ -1,10 +1,11 @@
from __future__ import print_function
+import json
import pipes
import re
from progressbar import NullProgressBar, ProgressBar
from structuredlog import TestLogger
# subprocess.list2cmdline does not properly escape for sh-like shells
@@ -57,21 +58,68 @@ class TestResult:
"""Classified result from a test run."""
def __init__(self, test, result, results):
self.test = test
self.result = result
self.results = results
@classmethod
+ def from_wpt_output(cls, output):
+ """Parse the output from a web-platform test that uses testharness.js.
+ (The output is written to stdout in js/src/tests/testharnessreport.js.)
+ """
+ from wptrunner.executors.base import testharness_result_converter
+
+ rc = output.rc
+ if rc != 0:
+ if rc == 3:
+ harness_status = "ERROR"
+ else:
+ harness_status = "CRASH"
+ tests = []
+ else:
+ for line in output.out.split("\n"):
+ if line.startswith("WPT OUTPUT: "):
+ msg = line[len("WPT OUTPUT: "):]
+ data = [output.test.wpt.url] + json.loads(msg)
+ harness_status_obj, tests = testharness_result_converter(output.test.wpt, data)
+ harness_status = harness_status_obj.status
+ break
+ else:
+ harness_status = "ERROR"
+ tests = []
+
+ result = cls.PASS
+ results = []
+ if harness_status != output.test.wpt.expected():
+ if harness_status == "CRASH":
+ result = cls.CRASH
+ else:
+ result = cls.FAIL
+ else:
+ for test in tests:
+ if test.status == output.test.wpt.expected(test.name):
+ test_result = (cls.PASS, "")
+ else:
+ test_result = (cls.FAIL, test.message)
+ result = cls.FAIL
+ results.append(test_result)
+
+ return cls(output.test, result, results)
+
+ @classmethod
def from_output(cls, output):
test = output.test
result = None # str: overall result, see class-level variables
results = [] # (str,str) list: subtest results (pass/fail, message)
+ if test.wpt:
+ return cls.from_wpt_output(output)
+
out, err, rc = output.out, output.err, output.rc
failures = 0
passes = 0
expected_rcs = []
if test.path.endswith('-n.js'):
expected_rcs.append(3)
--- a/js/src/tests/lib/tests.py
+++ b/js/src/tests/lib/tests.py
@@ -140,17 +140,19 @@ def get_cpu_count():
pass
return 1
class RefTestCase(object):
"""A test case consisting of a test and an expected result."""
- def __init__(self, path):
+ def __init__(self, root, path, extra_helper_paths=None, wpt=None):
+ # str: path of the tests root dir
+ self.root = root
# str: path of JS file relative to tests root dir
self.path = path
# [str]: Extra options to pass to the shell
self.options = []
# [str]: JIT flags to pass to the shell
self.jitflags = []
# str or None: path to reflect-stringify.js file to test
# instead of actually running tests
@@ -172,40 +174,49 @@ class RefTestCase(object):
self.terms = None
# The tag between |...| in the test header.
self.tag = None
# Anything occuring after -- in the test header.
self.comment = None
- @staticmethod
- def prefix_command(path):
- """Return the '-f shell.js' options needed to run a test with the given
- path."""
+ self.extra_helper_paths = extra_helper_paths or []
+ self.wpt = wpt
+
+ def prefix_command(self):
+ """Return the '-f' options needed to run a test with the given path."""
+ path = self.path
prefix = []
while path != '':
assert path != '/'
path = os.path.dirname(path)
- shell_path = os.path.join(path, 'shell.js')
- prefix.append(shell_path)
+ shell_path = os.path.join(self.root, path, 'shell.js')
+ if os.path.exists(shell_path):
+ prefix.append(shell_path)
+ prefix.append('-f')
+ prefix.reverse()
+
+ for extra_path in self.extra_helper_paths:
prefix.append('-f')
+ prefix.append(extra_path)
- prefix.reverse()
return prefix
+ def abs_path(self):
+ return os.path.join(self.root, self.path)
+
def get_command(self, prefix):
- cmd = prefix + self.jitflags + self.options \
- + RefTestCase.prefix_command(self.path)
+ cmd = prefix + self.jitflags + self.options + self.prefix_command()
if self.test_reflect_stringify is not None:
- cmd += [self.test_reflect_stringify, "--check", self.path]
+ cmd += [self.test_reflect_stringify, "--check", self.abs_path()]
elif self.is_module:
- cmd += ["--module", self.path]
+ cmd += ["--module", self.abs_path()]
else:
- cmd += ["-f", self.path]
+ cmd += ["-f", self.abs_path()]
return cmd
def __str__(self):
ans = self.path
if not self.enable:
ans += ', skip'
if self.error is not None:
ans += ', error=' + self.error
new file mode 100644
--- /dev/null
+++ b/js/src/tests/testharnessreport.js
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+add_completion_callback(function(tests, harness_status) {
+ // This output is parsed in TestResult.from_wpt_output.
+ print("WPT OUTPUT: " + JSON.stringify([
+ harness_status.status,
+ harness_status.message,
+ harness_status.stack,
+ tests.map(function(t) {
+ return [t.name, t.status, t.message, t.stack];
+ }),
+ ]));
+});