Bug 1569559 - Don't remove subtests where a condition is outside the updatable set, r=maja_zf
authorJames Graham <james@hoppipolla.co.uk>
Fri, 20 Dec 2019 18:35:37 +0000
changeset 508060 256f8dffafec52a29dc97b522e89c0ed4148d7d0
parent 508059 0309a3ff69a0351a485808c0fcd484bbd93547e4
child 508061 0f937f06f71786f665bee862a33919e3b5bd1580
push id36938
push userbtara@mozilla.com
push dateSat, 21 Dec 2019 09:45:32 +0000
treeherdermozilla-central@de230ce6bd50 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmaja_zf
bugs1569559
milestone73.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1569559 - Don't remove subtests where a condition is outside the updatable set, r=maja_zf Sometimes we see subtests that only appear in certain conditions e.g. if we get an early error on nightly but the tests run as expected in stable. In this case the wpt sync bot will update the metadata with --full and remove the missing subtests. To work around this, check if subtests contain any conditions that aren't part of the update set, and if so never remove them Differential Revision: https://phabricator.services.mozilla.com/D57983
testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/manifestupdate.py
@@ -508,17 +508,17 @@ class PropertyUpdate(object):
         run_info_with_condition = set()
 
         if full_update:
             # Even for a full update we need to keep hand-written conditions not
             # using the properties we've specified and not matching any run_info
             top_level_props, dependent_props = self.node.root.run_info_properties
             update_properties = set(top_level_props)
             for item in itervalues(dependent_props):
-                update_properties |= set(dependent_props)
+                update_properties |= set(item)
             for condition in current_conditions:
                 if ((not condition.variables.issubset(update_properties) and
                      not run_info_by_condition[condition])):
                     conditions.append((condition.condition_node,
                                        self.from_ini_value(condition.value)))
 
             new_conditions, errors = self._update_conditions_full(property_tree,
                                                                   prev_default=prev_default)
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/metadata.py
@@ -1,14 +1,15 @@
 from __future__ import print_function
 import array
 import os
 from collections import defaultdict, namedtuple
 
 from mozlog import structuredlog
+from six import itervalues
 from six.moves import intern
 
 from . import manifestupdate
 from . import testloader
 from . import wptmanifest
 from . import wpttest
 from .expected import expected_path
 from .vcs import git
@@ -280,17 +281,17 @@ def update_from_logs(id_test_map, update
 
 
 def update_results(id_test_map,
                    update_properties,
                    full_update,
                    disable_intermittent,
                    update_intermittent,
                    remove_intermittent):
-    test_file_items = set(id_test_map.itervalues())
+    test_file_items = set(itervalues(id_test_map))
 
     default_expected_by_type = {}
     for test_type, test_cls in wpttest.manifest_test_cls.iteritems():
         if test_cls.result_cls:
             default_expected_by_type[(test_type, False)] = test_cls.result_cls.default_expected
         if test_cls.subtest_result_cls:
             default_expected_by_type[(test_type, True)] = test_cls.subtest_result_cls.default_expected
 
@@ -667,30 +668,54 @@ class TestFileData(object):
             for name in seen_subtests:
                 subtest = test.get_subtest(name)
                 # If any of the items have children (ie subsubtests) we want to prune thes
                 if subtest.children:
                     rv.extend(subtest.children)
 
         return rv
 
+    def filter_unknown_props(self, update_properties, subtests):
+        # Remove subtests which have some conditions that aren't in update_properties
+        # since removing these may be inappropriate
+        top_level_props, dependent_props = update_properties
+        all_properties = set(top_level_props)
+        for item in itervalues(dependent_props):
+            all_properties |= set(item)
+
+        filtered = []
+        for subtest in subtests:
+            include = True
+            for key, _ in subtest.iter_properties():
+                conditions = subtest.get_conditions(key)
+                for condition in conditions:
+                    if not condition.variables.issubset(all_properties):
+                        include = False
+                        break
+                if not include:
+                    break
+            if include:
+                filtered.append(subtest)
+        return filtered
+
     def update(self, default_expected_by_type, update_properties,
                full_update=False, disable_intermittent=None, update_intermittent=False,
                remove_intermittent=False):
         # If we are doing a full update, we may need to prune missing nodes
         # even if the expectations didn't change
         if not self.requires_update and not full_update:
             return
 
         expected = self.expected(update_properties,
                                  update_intermittent=update_intermittent,
                                  remove_intermittent=remove_intermittent)
 
         if full_update:
             orphans = self.orphan_subtests(expected)
+            orphans = self.filter_unknown_props(update_properties, orphans)
 
             if not self.requires_update and not orphans:
                 return
 
             if orphans:
                 expected.modified = True
                 for item in orphans:
                     item.remove()
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
@@ -62,17 +62,17 @@ def update(tests, *logs, **kwargs):
     assert not kwargs
     id_test_map, updater = create_updater(tests)
 
     for log in logs:
         log = create_log(log)
         updater.update_from_log(log)
 
     update_properties = (["debug", "os", "version", "processor"],
-                         {"os": ["version"], "processor": "bits"})
+                         {"os": ["version"], "processor": ["bits"]})
 
     expected_data = {}
     metadata.load_expected = lambda _, __, test_path, *args: expected_data.get(test_path)
     for test_path, test_ids, test_type, manifest_str in tests:
         expected_data[test_path] = manifestupdate.compile(BytesIO(manifest_str),
                                                           test_path,
                                                           "/",
                                                           update_properties,
@@ -1255,16 +1255,38 @@ def test_update_full_unknown():
 
     assert not new_manifest.is_empty
     assert new_manifest.get_test(test_id).children[0].get(
         "expected", run_info_1) == "FAIL"
     assert new_manifest.get_test(test_id).children[0].get(
         "expected", run_info_2) == "ERROR"
 
 
+@pytest.mark.xfail(sys.version[0] == "3",
+                   reason="metadata doesn't support py3")
+def test_update_full_unknown_missing():
+    tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
+  [subtest_deleted]
+    expected:
+      if release_or_beta: ERROR
+      FAIL
+""")]
+
+    log_0 = suite_log([("test_start", {"test": test_id}),
+                       ("test_status", {"test": test_id,
+                                        "subtest": "test1",
+                                        "status": "PASS",
+                                        "expected": "PASS"}),
+                       ("test_end", {"test": test_id,
+                                     "status": "OK"})],
+                      run_info={"debug": False, "release_or_beta": False})
+
+    updated = update(tests, log_0, full_update=True)
+    assert len(updated) == 0
+
 
 @pytest.mark.xfail(sys.version[0] == "3",
                    reason="metadata doesn't support py3")
 def test_update_default():
     tests = [("path/to/test.htm", [test_id], "testharness", """[test.htm]
   [test1]
     expected:
       if os == "mac": FAIL
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wptmanifest/backends/conditional.py
@@ -377,16 +377,20 @@ class ManifestItem(object):
     def iteritems(self):
         for item in self._flatten().iteritems():
             yield item
 
     def iterkeys(self):
         for item in self._flatten().iterkeys():
             yield item
 
+    def iter_properties(self):
+        for item in self._data:
+            yield item, self._data[item]
+
     def remove_value(self, key, value):
         if key not in self._data:
             return
         try:
             self._data[key].remove(value)
         except ValueError:
             return
         if not self._data[key]: