author | James Graham <james@hoppipolla.co.uk> |
Thu, 11 Jul 2019 03:43:23 +0000 | |
changeset 482361 | 203e09f37d1f38b5f6f7de0fd8dc181a66d6daa2 |
parent 482360 | 76af8bbce61aedb0dbc30691e2a4f9dccf6018bf |
child 482362 | 9559ef8f347dc0e4e92546954472d4153283d5f2 |
push id | 89718 |
push user | james@hoppipolla.co.uk |
push date | Thu, 11 Jul 2019 09:14:02 +0000 |
treeherder | autoland@203e09f37d1f [default view] [failures only] |
perfherder | [talos] [build metrics] [platform microbench] (compared to previous push) |
reviewers | maja_zf |
bugs | 1564917, 1545143 |
milestone | 70.0a1 |
first release with | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
last release without | nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
|
--- a/testing/web-platform/README.md +++ b/testing/web-platform/README.md @@ -201,16 +201,42 @@ possible to unset a value that has been special token `@Reset` (usually used with prefs), or to force a value to true or false using `@True` and `@False`. For example to enable the tests in `meta/feature/unsupported/subfeature-supported` one might create an ini file `meta/feature/unsupported/subfeature-supported/__dir__.ini` like: disabled: @False +Setting Metadata for Release Branches +------------------------------------- + +Run info properties can be used to set metadata for release branches +that differs from nightly (e.g. for when a feature depends on prefs +that are only set on nightly), for example: + + [filename.html] + expected: + if release_or_beta: FAIL + +Note that in general the automatic metadata update will work better if +the nonstandard configuration is used explictly in the conditional, +and placed at the top of the set of conditions, i.e. the following +would cause problems later: + + [filename.html] + expected: + if nightly_build: PASS + FAIL + +This is because on import the automatic metadata updates are run +against the results of nightly builds, and we remove any existing +conditions that match that configuration to avoid building up stale +configuration options. + Test Format ----------- Javascript tests are written using [testharness.js](http://github.com/w3c/testharness.js/). Reftests are similar to standard Gecko reftests without an explicit manifest file, but with in-test or filename conventions for identifying the reference.
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py @@ -403,16 +403,23 @@ class PropertyUpdate(object): self.node.new_disabled = True else: msg = "Conflicting metadata values for %s" % ( self.node.root.test_path) if e.cond: msg += ": %s" % serialize(e.cond).strip() print(msg) + # If all the values match remove all conditionals + # This handles the case where we update a number of existing conditions and they + # all end up looking like the post-update default. + new_default = conditions[-1][1] if conditions[-1][0] is None else self.default_value + if all(condition[1] == new_default for condition in conditions): + conditions = [(None, new_default)] + # Don't set the default to the class default if (conditions and conditions[-1][0] is None and conditions[-1][1] == self.default_value): self.node.modified = True conditions = conditions[:-1] if self.node.modified: @@ -423,17 +430,18 @@ class PropertyUpdate(object): self.to_ini_value(value), condition) def update_conditions(self, property_tree, full_update): # This is complicated because the expected behaviour is complex # The complexity arises from the fact that there are two ways of running # the tool, with a full set of runs (full_update=True) or with partial metadata # (full_update=False). In the case of a full update things are relatively simple: - # * All existing conditionals are ignored + # * All existing conditionals are ignored, with the exception of conditionals that + # depend on variables not used by the updater, which are retained as-is # * All created conditionals are independent of each other (i.e. order isn't # important in the created conditionals) # In the case where we don't have a full set of runs, the expected behaviour # is much less clear. This is of course the common case for when a developer # runs the test on their own machine. In this case the assumptions above are untrue # * The existing conditions may be required to handle other platforms # * The order of the conditions may be important, since we don't know if they overlap # e.g. `if os == linux and version == 18.04` overlaps with `if (os != win)`. @@ -446,18 +454,16 @@ class PropertyUpdate(object): # * For each existing conditional, see if it matches any of the run info we # have. In cases where it does match, record the new results # * Where all the new results match, update the right hand side of that # conditional, otherwise remove it # * If this leaves nothing existing, then proceed as with the full update # * Otherwise add conditionals for the run_info that doesn't match any # remaining conditions prev_default = None - if full_update: - return self._update_conditions_full(property_tree) current_conditions = self.node.get_conditions(self.property_name) # Ignore the current default value if current_conditions and current_conditions[-1].condition_node is None: self.node.modified = True prev_default = current_conditions[-1].value current_conditions = current_conditions[:-1] @@ -479,16 +485,33 @@ class PropertyUpdate(object): for (run_info, node) in run_info_index.iteritems() if node.result_values} run_info_by_condition = self.run_info_by_condition(run_info_index, current_conditions) run_info_with_condition = set() + if full_update: + # Even for a full update we need to keep hand-written conditions not + # using the properties we've specified and not matching any run_info + top_level_props, dependent_props = self.node.root.run_info_properties + update_properties = set(top_level_props) + for item in dependent_props.itervalues(): + update_properties |= set(dependent_props) + for condition in current_conditions: + if (not condition.variables.issubset(update_properties) and + not run_info_by_condition[condition]): + conditions.append((condition.condition_node, + self.from_ini_value(condition.value))) + new_conditions, errors = self._update_conditions_full(property_tree, + prev_default=prev_default) + conditions.extend(new_conditions) + return conditions, errors + # Retain existing conditions if they match the updated values for condition in current_conditions: # All run_info that isn't handled by some previous condition all_run_infos_condition = run_info_by_condition[condition] run_infos = {item for item in all_run_infos_condition if item not in run_info_with_condition} if not run_infos:
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py @@ -589,16 +589,61 @@ def test_update_full(): assert new_manifest.get_test(test_id).children[0].get( "expected", run_info_1) == "FAIL" assert new_manifest.get_test(test_id).children[0].get( "expected", run_info_2) == "ERROR" @pytest.mark.xfail(sys.version[0] == "3", reason="metadata doesn't support py3") +def test_update_full_unknown(): + test_id = "/path/to/test.htm" + tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm] + [test1] + expected: + if release_or_beta: ERROR + if not debug and os == "osx": NOTRUN +""")] + + log_0 = suite_log([("test_start", {"test": test_id}), + ("test_status", {"test": test_id, + "subtest": "test1", + "status": "FAIL", + "expected": "PASS"}), + ("test_end", {"test": test_id, + "status": "OK"})], + run_info={"debug": False, "release_or_beta": False}) + + log_1 = suite_log([("test_start", {"test": test_id}), + ("test_status", {"test": test_id, + "subtest": "test1", + "status": "FAIL", + "expected": "PASS"}), + ("test_end", {"test": test_id, + "status": "OK"})], + run_info={"debug": True, "release_or_beta": False}) + + updated = update(tests, log_0, log_1, full_update=True) + new_manifest = updated[0][1] + + run_info_1 = default_run_info.copy() + run_info_1.update({"release_or_beta": False}) + run_info_2 = default_run_info.copy() + run_info_2.update({"release_or_beta": True}) + + assert not new_manifest.is_empty + assert new_manifest.get_test(test_id).children[0].get( + "expected", run_info_1) == "FAIL" + assert new_manifest.get_test(test_id).children[0].get( + "expected", run_info_2) == "ERROR" + + + +@pytest.mark.xfail(sys.version[0] == "3", + reason="metadata doesn't support py3") def test_update_default(): test_id = "/path/to/test.htm" tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm] [test1] expected: if os == "mac": FAIL ERROR""")]
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py +++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py @@ -1,11 +1,11 @@ import operator -from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode, BinaryExpressionNode +from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ListNode, ValueNode, BinaryExpressionNode, VariableNode from ..parser import parse class ConditionalValue(object): def __init__(self, node, condition_func): self.node = node assert callable(condition_func) self.condition_func = condition_func @@ -56,16 +56,30 @@ class ConditionalValue(object): value = type_func(value) return value def remove(self): if len(self.node.parent.children) == 1: self.node.parent.remove() self.node.remove() + @property + def variables(self): + rv = set() + if self.condition_node is None: + return rv + stack = [self.condition_node] + while stack: + node = stack.pop() + if isinstance(node, VariableNode): + rv.add(node.data) + for child in reversed(node.children): + stack.append(child) + return rv + class Compiler(NodeVisitor): def compile(self, tree, data_cls_getter=None, **kwargs): """Compile a raw AST into a form where conditional expressions are represented by ConditionalValue objects that can be evaluated at runtime. tree - The root node of the wptmanifest AST to compile