Backed out 12 changesets (bug 1497898) for build bustages. CLOSED TREE
authorBrindusan Cristian <cbrindusan@mozilla.com>
Fri, 19 Oct 2018 15:37:41 +0300
changeset 490491 fe1c2bb6cfbc4d2da8c30094e15cdb7d92039d94
parent 490490 59fed9e69c53f229e240d0bdfb0b5ca2d3afeb67
child 490492 db19bf2d6479d2e7e399b5ff03eaee0019b3d05b
push id247
push userfmarier@mozilla.com
push dateSat, 27 Oct 2018 01:06:44 +0000
bugs1497898
milestone64.0a1
backs out57877c614829ff051ed0e2b66f895292e904b207
22a06c8c8dc6ea3a7413904dc486eb2bc2ac2f92
7bba4d617db672ba06b026f3f5d9e067e0c01d0d
3a9a7760db5c9e1bf584657ef14f1afbdeee5a91
c482d18cc0507e202e004238c15a3bb30b64a837
2caa5633dea13f347932979888fc3424f50c4c77
48be184d5377dd0aee94eca7ef5a3c4d369069c1
184bc31c33a6edba5ddb13d1daef81c39f87370f
c3cb0408498cd890dba6038de0debf247ba03324
a6a89509add7962cae05fc9b7e504c42910a7147
9afac925aef8a815e674ffb2e56c4220f7913004
5e3b8ad4c8f42342d9d16723236261501aafd040
Backed out 12 changesets (bug 1497898) for build bustages. CLOSED TREE Backed out changeset 57877c614829 (bug 1497898) Backed out changeset 22a06c8c8dc6 (bug 1497898) Backed out changeset 7bba4d617db6 (bug 1497898) Backed out changeset 3a9a7760db5c (bug 1497898) Backed out changeset c482d18cc050 (bug 1497898) Backed out changeset 2caa5633dea1 (bug 1497898) Backed out changeset 48be184d5377 (bug 1497898) Backed out changeset 184bc31c33a6 (bug 1497898) Backed out changeset c3cb0408498c (bug 1497898) Backed out changeset a6a89509add7 (bug 1497898) Backed out changeset 9afac925aef8 (bug 1497898) Backed out changeset 5e3b8ad4c8f4 (bug 1497898)
testing/web-platform/mach_commands_base.py
testing/web-platform/manifestupdate.py
testing/web-platform/tests/.gitignore
testing/web-platform/tests/tools/ci/ci_wptrunner_infrastructure.sh
testing/web-platform/tests/tools/gitignore/gitignore.py
testing/web-platform/tests/tools/gitignore/tests/test_gitignore.py
testing/web-platform/tests/tools/lint/lint.py
testing/web-platform/tests/tools/lint/tests/test_lint.py
testing/web-platform/tests/tools/manifest/commands.json
testing/web-platform/tests/tools/manifest/item.py
testing/web-platform/tests/tools/manifest/manifest.py
testing/web-platform/tests/tools/manifest/tests/test_manifest.py
testing/web-platform/tests/tools/manifest/update.py
testing/web-platform/tests/tools/manifest/vcs.py
testing/web-platform/tests/tools/wpt/testfiles.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py
testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
--- a/testing/web-platform/mach_commands_base.py
+++ b/testing/web-platform/mach_commands_base.py
@@ -1,17 +1,14 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 
-import os
 import sys
 
-from mozboot.util import get_state_dir
-
 
 def create_parser_wpt():
     from wptrunner import wptcommandline
     return wptcommandline.create_parser(["fennec", "firefox", "chrome", "edge", "servo"])
 
 
 class WebPlatformTestsRunner(object):
     """Run web platform tests."""
@@ -42,10 +39,9 @@ class WebPlatformTestsRunner(object):
         result = wptrunner.start(**kwargs)
         return int(not result)
 
     def update_manifest(self, logger, **kwargs):
         import manifestupdate
         return manifestupdate.run(logger=logger,
                                   src_root=self.setup.topsrcdir,
                                   obj_root=self.setup.topobjdir,
-                                  cache_root=os.path.join(get_state_dir()[0], "cache", "wpt"),
                                   **kwargs)
--- a/testing/web-platform/manifestupdate.py
+++ b/testing/web-platform/manifestupdate.py
@@ -1,11 +1,10 @@
 import ConfigParser
 import argparse
-import hashlib
 import imp
 import os
 import sys
 
 from mozlog.structured import commandline
 from wptrunner.wptcommandline import set_from_config
 
 import manifestdownload
@@ -101,18 +100,17 @@ def run(src_root, obj_root, logger=None,
         manifestdownload.download_from_taskcluster(logger,
                                                    src_root,
                                                    test_paths,
                                                    force=force_download)
     else:
         logger.debug("Skipping manifest download")
 
     if kwargs["update"] or kwargs["rebuild"]:
-        manifests = update(logger, src_wpt_dir, test_paths, rebuild=kwargs["rebuild"],
-                           cache_root=kwargs["cache_root"])
+        manifests = update(logger, src_wpt_dir, test_paths, rebuild=kwargs["rebuild"])
     else:
         logger.debug("Skipping manifest update")
         manifests = load_manifests(test_paths)
 
     return manifests
 
 
 def ensure_manifest_directories(logger, test_paths):
@@ -161,27 +159,34 @@ def generate_config(logger, repo_root, w
     parser.set('paths', 'prefs', os.path.abspath(os.path.join(wpt_dir, parser.get("paths", "prefs"))))
 
     with open(dest_config_path, 'wb') as config_file:
         parser.write(config_file)
 
     return dest_config_path
 
 
-def update(logger, wpt_dir, test_paths, rebuild=False, config_dir=None, cache_root=None):
+def update(logger, wpt_dir, test_paths, rebuild=False, config_dir=None):
     rv = {}
-    wptdir_hash = hashlib.sha256(os.path.abspath(wpt_dir)).hexdigest()
+
     for url_base, paths in test_paths.iteritems():
+        m = None
         manifest_path = paths["manifest_path"]
-        this_cache_root = os.path.join(cache_root, wptdir_hash, os.path.dirname(paths["manifest_rel_path"]))
-        m = manifest.manifest.load_and_update(paths["tests_path"],
-                                              manifest_path,
-                                              url_base,
-                                              working_copy=True,
-                                              cache_root=this_cache_root)
+        if not rebuild and os.path.exists(manifest_path):
+            logger.info("Updating manifest %s" % manifest_path)
+            try:
+                m = manifest.manifest.load(paths["tests_path"], manifest_path)
+            except manifest.manifest.ManifestVersionMismatch:
+                logger.info("Manifest format changed, rebuilding")
+        if m is None:
+            logger.info("Recreating manifest %s" % manifest_path)
+            m = manifest.manifest.Manifest(url_base)
+        manifest.update.update(paths["tests_path"], m, working_copy=True)
+        manifest.manifest.write(m, manifest_path)
+
         path_data = {"url_base": url_base}
         path_data.update(paths)
         rv[m] = path_data
 
     return rv
 
 
 def load_manifests(test_paths):
--- a/testing/web-platform/tests/.gitignore
+++ b/testing/web-platform/tests/.gitignore
@@ -1,24 +1,28 @@
 # Python
 *.py[co]
 .virtualenv/
 _venv/
 .cache/
 .pytest_cache/
 .tox/
-.coverage*
 
 # Node
 node_modules/
 
 # WPT repo stuff
 /MANIFEST.json
-.wptcache/
-/config.json
+
+testharness_runner.html
+!/testharness_runner.html
+!/tools/wptrunner/wptrunner/testharness_runner.html
+
+_certs
+config.json
 
 # Various OS/editor specific files
 *#
 *.sw[po]
 *~
 \#*
 scratch
 .idea/
--- a/testing/web-platform/tests/tools/ci/ci_wptrunner_infrastructure.sh
+++ b/testing/web-platform/tests/tools/ci/ci_wptrunner_infrastructure.sh
@@ -14,17 +14,16 @@ test_infrastructure() {
     else
         ARGS=$1
     fi
     ./wpt run --log-tbpl - --yes --manifest ~/meta/MANIFEST.json --metadata infrastructure/metadata/ --install-fonts $ARGS $PRODUCT infrastructure/
 }
 
 main() {
     PRODUCTS=( "firefox" "chrome" )
-    ./wpt manifest --rebuild -p ~/meta/MANIFEST.json
     for PRODUCT in "${PRODUCTS[@]}"; do
         if [ "$PRODUCT" != "firefox" ]; then
             # Firefox is expected to work using pref settings for DNS
             # Don't adjust the hostnames in that case to ensure this keeps working
             hosts_fixup
         fi
         if [[ "$PRODUCT" == "chrome" ]]; then
             install_chrome unstable
--- a/testing/web-platform/tests/tools/gitignore/gitignore.py
+++ b/testing/web-platform/tests/tools/gitignore/gitignore.py
@@ -1,251 +1,153 @@
 import re
 import os
-import itertools
-from six import itervalues, iteritems
-from collections import defaultdict
-
 
 end_space = re.compile(r"([^\\]\s)*$")
 
 
-def fnmatch_translate(pat):
+def fnmatch_translate(pat, path_name=False):
     parts = []
-    seq = None
+    seq = False
     i = 0
-    any_char = "[^/]"
-    if pat[0] == "/":
+    if pat[0] == "/" or path_name:
         parts.append("^")
-        pat = pat[1:]
+        any_char = "[^/]"
+        if pat[0] == "/":
+            pat = pat[1:]
     else:
-        # By default match the entire path up to a /
-        # but if / doesn't appear in the pattern we will mark is as
-        # a name pattern and just produce a pattern that matches against
-        # the filename
+        any_char = "."
         parts.append("^(?:.*/)?")
-
-    name_pattern = True
     if pat[-1] == "/":
         # If the last character is / match this directory or any subdirectory
         pat = pat[:-1]
         suffix = "(?:/|$)"
     else:
         suffix = "$"
     while i < len(pat):
         c = pat[i]
         if c == "\\":
             if i < len(pat) - 1:
                 i += 1
                 c = pat[i]
                 parts.append(re.escape(c))
             else:
                 raise ValueError
-        elif seq is not None:
-            # TODO: this doesn't really handle invalid sequences in the right way
+        elif seq:
             if c == "]":
-                seq = None
+                seq = False
+                # First two cases are to deal with the case where / is the only character
+                # in the sequence but path_name is True so it shouldn't match anything
                 if parts[-1] == "[":
                     parts = parts[:-1]
                 elif parts[-1] == "^" and parts[-2] == "[":
                     parts = parts[:-2]
                 else:
                     parts.append(c)
             elif c == "-":
                 parts.append(c)
-            else:
+            elif not (path_name and c == "/"):
                 parts += re.escape(c)
         elif c == "[":
             parts.append("[")
             if i < len(pat) - 1 and pat[i+1] in ("!", "^"):
                 parts.append("^")
                 i += 1
-            seq = i
+            seq = True
         elif c == "*":
             if i < len(pat) - 1 and pat[i+1] == "*":
-                if i > 0 and pat[i-1] != "/":
-                    raise ValueError
-                parts.append(".*")
+                parts.append(any_char + "*")
                 i += 1
-                if i < len(pat) - 1 and pat[i+1] != "/":
+                if i < len(pat) - 1 and pat[i+1] == "*":
                     raise ValueError
             else:
                 parts.append(any_char + "*")
         elif c == "?":
             parts.append(any_char)
-        elif c == "/" and not seq:
-            name_pattern = False
-            parts.append(c)
         else:
             parts.append(re.escape(c))
         i += 1
 
-    if name_pattern:
-        parts[0] = "^"
-
-    if seq is not None:
+    if seq:
         raise ValueError
     parts.append(suffix)
     try:
-        return name_pattern, re.compile("".join(parts))
+        return re.compile("".join(parts))
     except Exception:
-        raise ValueError
-
-# Regexp matching rules that have to be converted to patterns
-pattern_re = re.compile(r".*[\*\[\?]")
+        raise
 
 
 def parse_line(line):
     line = line.rstrip()
     if not line or line[0] == "#":
         return
 
     invert = line[0] == "!"
     if invert:
         line = line[1:]
 
     dir_only = line[-1] == "/"
 
     if dir_only:
         line = line[:-1]
 
-    # Could make a special case for **/foo, but we don't have any patterns like that
-    if not invert and not pattern_re.match(line):
-        literal = True
-        pattern = tuple(line.rsplit("/", 1))
-    else:
-        pattern = fnmatch_translate(line)
-        literal = False
-
-    return invert, dir_only, literal, pattern
+    return invert, dir_only, fnmatch_translate(line, dir_only)
 
 
 class PathFilter(object):
-    def __init__(self, root, extras=None, cache=None):
+    def __init__(self, root, extras=None):
         if root:
             ignore_path = os.path.join(root, ".gitignore")
         else:
             ignore_path = None
         if not ignore_path and not extras:
             self.trivial = True
             return
         self.trivial = False
 
-        self.literals_file = defaultdict(dict)
-        self.literals_dir = defaultdict(dict)
-        self.patterns_file = []
-        self.patterns_dir = []
-        self.cache = cache or {}
+        self.rules_file = []
+        self.rules_dir = []
 
         if extras is None:
             extras = []
 
         if ignore_path and os.path.exists(ignore_path):
-            args = ignore_path, extras
-        else:
-            args = None, extras
-        self._read_ignore(*args)
+            self._read_ignore(ignore_path)
+
+        for item in extras:
+            self._read_line(item)
 
-    def _read_ignore(self, ignore_path, extras):
-        if ignore_path is not None:
-            with open(ignore_path) as f:
-                for line in f:
-                    self._read_line(line)
-        for line in extras:
-            self._read_line(line)
+    def _read_ignore(self, ignore_path):
+        with open(ignore_path) as f:
+            for line in f:
+                self._read_line(line)
 
     def _read_line(self, line):
         parsed = parse_line(line)
         if not parsed:
             return
-        invert, dir_only, literal, rule = parsed
-
-        if invert:
-            # For exclude rules, we attach the rules to all preceeding patterns, so
-            # that we can match patterns out of order and check if they were later
-            # overriden by an exclude rule
-            assert not literal
-            if not dir_only:
-                rules_iter = itertools.chain(
-                    itertools.chain(*(iteritems(item) for item in itervalues(self.literals_dir))),
-                    itertools.chain(*(iteritems(item) for item in itervalues(self.literals_file))),
-                    self.patterns_dir,
-                    self.patterns_file)
-            else:
-                rules_iter = itertools.chain(
-                    itertools.chain(*(iteritems(item) for item in itervalues(self.literals_dir))),
-                    self.patterns_dir)
-
-            for rules in rules_iter:
-                rules[1].append(rule)
+        invert, dir_only, regexp = parsed
+        if dir_only:
+            self.rules_dir.append((regexp, invert))
         else:
-            if literal:
-                if len(rule) == 1:
-                    dir_name, pattern = None, rule[0]
-                else:
-                    dir_name, pattern = rule
-                self.literals_dir[dir_name][pattern] = []
-                if not dir_only:
-                    self.literals_file[dir_name][pattern] = []
-            else:
-                self.patterns_dir.append((rule, []))
-                if not dir_only:
-                    self.patterns_file.append((rule, []))
+            self.rules_file.append((regexp, invert))
 
-    def filter(self, iterator):
-        empty = {}
-        for dirpath, dirnames, filenames in iterator:
-            orig_dirpath = dirpath
-            if os.path.sep != "/":
-                dirpath = dirpath.replace(os.path.sep, "/")
+    def __call__(self, path):
+        if os.path.sep != "/":
+            path = path.replace(os.path.sep, "/")
 
-            keep_dirs = []
-            keep_files = []
+        if self.trivial:
+            return True
 
-            for iter_items, literals, patterns, target, suffix in [
-                    (dirnames, self.literals_dir, self.patterns_dir, keep_dirs, "/"),
-                    (filenames, self.literals_file, self.patterns_file, keep_files, "")]:
-                for item in iter_items:
-                    name = item[0]
-                    if dirpath:
-                        path = "%s/%s" % (dirpath, name) + suffix
-                    else:
-                        path = name + suffix
-                    if path in self.cache:
-                        if not self.cache[path]:
-                            target.append(item)
-                        continue
-                    for rule_dir in [None, dirpath]:
-                        if name in literals.get(rule_dir, empty):
-                            exclude = literals[rule_dir][name]
-                            if not any(rule.match(path) for rule in exclude):
-                                # Skip this item
-                                self.cache[path] = True
-                                break
-                    else:
-                        for (component_only, pattern), exclude in patterns:
-                            if component_only:
-                                match = pattern.match(name)
-                            else:
-                                match = pattern.match(path)
-                            if match:
-                                if not any(rule.match(name if name_only else path)
-                                           for name_only, rule in exclude):
-                                    # Skip this item
-                                    self.cache[path] = True
-                                    break
-                        else:
-                            self.cache[path] = False
-                            target.append(item)
+        path_is_dir = path[-1] == "/"
+        if path_is_dir:
+            path = path[:-1]
+            rules = self.rules_dir
+        else:
+            rules = self.rules_file
 
-            dirnames[:] = keep_dirs
-            assert ".git" not in dirnames
-            yield orig_dirpath, dirnames, keep_files
-
-    def __call__(self, iterator):
-        if self.trivial:
-            return iterator
-
-        return self.filter(iterator)
-
-
-def has_ignore(dirpath):
-    return os.path.exists(os.path.join(dirpath, ".gitignore"))
+        include = True
+        for regexp, invert in rules:
+            if not include and invert and regexp.match(path):
+                include = True
+            elif include and not invert and regexp.match(path):
+                include = False
+        return include
--- a/testing/web-platform/tests/tools/gitignore/tests/test_gitignore.py
+++ b/testing/web-platform/tests/tools/gitignore/tests/test_gitignore.py
@@ -1,100 +1,82 @@
 import pytest
 
 from ..gitignore import fnmatch_translate, PathFilter
 
 match_data = [
-    ("foo", True, ["a/foo", "foo"]),
-    ("*.a", True, ["foo.a", "a/foo.a", "a/b/foo.a", "a.a/foo.a"]),
-    ("*.py[co]", True, ["a.pyc", "a.pyo", "a/b/c.pyc"]),
-    ("\\#*", True, ["#a", "a/#b"]),
-    ("*#", True, ["a#", "a/b#", "#a#"]),
-    ("/*.c", True, ["a.c", ".c"]),
+    ("foo", False, ["a/foo", "foo"]),
+    ("*.a", False, ["foo.a", "a/foo.a", "a/b/foo.a", "a.a/foo.a"]),
+    ("*.py[co]", False, ["a.pyc", "a.pyo", "a/b/c.pyc"]),
+    ("\\#*", False, ["#a", "a/#b"]),
+    ("*#", False, ["a#", "a/b#", "#a#"]),
+    ("/*.c", False, ["a.c", ".c"]),
     ("**/b", False, ["a/b", "a/c/b"]),
     ("*b", True, ["ab"]),
-    ("*b", True, ["a/b"]),
-    ("**/b", False, ["a/b"]),
-    ("a/", True, ["a"]),
-    ("a[/]b", True, []),
-    ("**/b", False, ["a/c/b"]),
-    ("a?c", True, ["abc"]),
-    ("a[^b]c", True, ["acc"]),
-    ("a[b-c]c", True, ["abc", "acc"]),
-    ("a[^]c", True, ["ac"]),  # This is probably wrong
-    ("a[^]c", True, ["ac"]),  # This is probably wrong
+    ("**/b", True, ["a/b"]),
+    ("a/", True, ["a", "a/b", "a/b/c"])
 ]
 
 mismatch_data = [
-    ("foo", True, ["foob", "afoo"]),
-    ("*.a", True, ["a", "foo:a", "a.a/foo"]),
-    ("*.py[co]", True, ["a.pyd", "pyo", "a.py"]),
-    ("a", True, ["ab"]),
-    ("a?c", True, ["ac", "abbc"]),
-    ("a[^b]c", True, ["abc"]),
-    ("a[b-c]c", True, ["adc"]),
+    ("foo", False, ["foob", "afoo"]),
+    ("*.a", False, ["a", "foo:a", "a.a/foo"]),
+    ("*.py[co]", False, ["a.pyd", "pyo"]),
+    ("/*.c", False, ["a/b.c"]),
+    ("*b", True, ["a/b"]),
+    ("**b", True, ["a/b"]),
+    ("a[/]b", True, ["a/b"]),
+    ("**/b", True, ["a/c/b"]),
+    ("a", True, ["ab"])
 ]
 
 invalid_data = [
     "[a",
     "***/foo",
     "a\\",
-    "**b",
-    "b**/",
-    "[[]"
 ]
 
 filter_data = [
-    (["foo", "bar/", "/a", "*.py"],
-     [("", ["foo", "bar", "baz"], ["a"]),
-      ("baz", ["a"], ["foo", "bar"])],
-     [(["baz"], []),
-      (["a"], ["bar"])]),
-    (["#foo", "", "a*", "!a.py"],
-     [("", ["foo"], ["a", "a.foo", "a.py"])],
-     [(["foo"], ["a.py"])]),
+    ("foo", True),
+    ("a", False),
+    ("a/b", False),
+    ("a/c", True),
+    ("a/c/", False),
+    ("c/b", True)
 ]
 
 
 def expand_data(compact_data):
-    for pattern, name_only, inputs in compact_data:
+    for pattern, path_name, inputs in compact_data:
         for input in inputs:
-            yield pattern, name_only, input
+            yield pattern, input, path_name
 
 
-@pytest.mark.parametrize("pattern, name_only, input", expand_data(match_data))
-def tests_match(pattern, name_only, input):
-    name_only_result, regexp = fnmatch_translate(pattern)
-    assert name_only_result == name_only
-    if name_only:
-        input = input.rsplit("/", 1)[-1]
+@pytest.mark.parametrize("pattern, input, path_name", expand_data(match_data))
+def tests_match(pattern, input, path_name):
+    regexp = fnmatch_translate(pattern, path_name)
     assert regexp.match(input) is not None
 
 
-@pytest.mark.parametrize("pattern, name_only, input", expand_data(mismatch_data))
-def tests_no_match(pattern, name_only, input):
-    name_only_result, regexp = fnmatch_translate(pattern)
-    assert name_only_result == name_only
-    if name_only:
-        input = input.rsplit("/", 1)[-1]
+@pytest.mark.parametrize("pattern, input, path_name", expand_data(mismatch_data))
+def tests_no_match(pattern, input, path_name):
+    regexp = fnmatch_translate(pattern, path_name)
     assert regexp.match(input) is None
 
 
 @pytest.mark.parametrize("pattern", invalid_data)
 def tests_invalid(pattern):
     with pytest.raises(ValueError):
-        fnmatch_translate(pattern)
+        fnmatch_translate(pattern, False)
+    with pytest.raises(ValueError):
+        fnmatch_translate(pattern, True)
 
 
-@pytest.mark.parametrize("rules, input, expected", filter_data)
-def test_path_filter(rules, input, expected):
-    f = PathFilter(None, rules)
-    # Add some fake stat data
-    for i, item in enumerate(input):
-        repl = [input[i][0]]
-        for j in [1, 2]:
-            repl.append([(name, None) for name in input[i][j]])
-        input[i] = tuple(repl)
-
-    for i, output in enumerate(f(input)):
-        assert output[0] == input[i][0]
-        for j in [1, 2]:
-            assert [item[0] for item in output[j]] == expected[i][j-1]
+@pytest.mark.parametrize("path, expected", filter_data)
+def test_path_filter(path, expected):
+    extras = [
+        "#foo",
+        "a  ",
+        "**/b",
+        "a/c/",
+        "!c/b",
+    ]
+    f = PathFilter(None, extras)
+    assert f(path) == expected
--- a/testing/web-platform/tests/tools/lint/lint.py
+++ b/testing/web-platform/tests/tools/lint/lint.py
@@ -11,17 +11,16 @@ import sys
 import tempfile
 
 from collections import defaultdict
 
 from . import fnmatch
 from .. import localpaths
 from ..gitignore.gitignore import PathFilter
 from ..wpt import testfiles
-from ..manifest.vcs import walk
 
 from manifest.sourcefile import SourceFile, js_meta_re, python_meta_re, space_chars, get_any_variants, get_default_any_variants
 from six import binary_type, iteritems, itervalues
 from six.moves import range
 from six.moves.urllib.parse import urlsplit, urljoin
 
 import logging
 
@@ -63,21 +62,24 @@ you could add the following line to the 
 %s: %s"""
 
 def all_filesystem_paths(repo_root, subdir=None):
     path_filter = PathFilter(repo_root, extras=[".git/"])
     if subdir:
         expanded_path = subdir
     else:
         expanded_path = repo_root
-    for dirpath, dirnames, filenames in path_filter(walk(expanded_path)):
-        for filename, _ in filenames:
-            path = os.path.join(dirpath, filename)
-            yield path
-
+    for dirpath, dirnames, filenames in os.walk(expanded_path):
+        for filename in filenames:
+            path = os.path.relpath(os.path.join(dirpath, filename), repo_root)
+            if path_filter(path):
+                yield path
+        dirnames[:] = [item for item in dirnames if
+                       path_filter(os.path.relpath(os.path.join(dirpath, item) + "/",
+                                                   repo_root)+"/")]
 
 def _all_files_equal(paths):
     """
     Checks all the paths are files that are byte-for-byte identical
 
     :param paths: the list of paths to compare
     :returns: True if they are all identical
     """
--- a/testing/web-platform/tests/tools/lint/tests/test_lint.py
+++ b/testing/web-platform/tests/tools/lint/tests/test_lint.py
@@ -396,23 +396,23 @@ def test_check_css_globally_unique_ignor
             assert rv == 0
             assert mocked_check_path.call_count == 1
             assert mocked_check_file_contents.call_count == 1
     assert caplog.text == ""
 
 
 def test_all_filesystem_paths():
     with mock.patch(
-            'tools.lint.lint.walk',
-            return_value=[('',
-                           [('dir_a', None), ('dir_b', None)],
-                           [('file_a', None), ('file_b', None)]),
-                          ('dir_a',
+            'os.walk',
+            return_value=[('.',
+                           ['dir_a', 'dir_b'],
+                           ['file_a', 'file_b']),
+                          (os.path.join('.', 'dir_a'),
                            [],
-                           [('file_c', None), ('file_d', None)])]
+                           ['file_c', 'file_d'])]
     ):
         got = list(lint_mod.all_filesystem_paths('.'))
         assert got == ['file_a',
                        'file_b',
                        os.path.join('dir_a', 'file_c'),
                        os.path.join('dir_a', 'file_d')]
 
 
--- a/testing/web-platform/tests/tools/manifest/commands.json
+++ b/testing/web-platform/tests/tools/manifest/commands.json
@@ -1,5 +1,6 @@
 {"manifest":
  {"path": "update.py", "script": "run", "parser": "create_parser", "help": "Update the MANIFEST.json file",
   "virtualenv": false},
  "manifest-download":
- {"path": "download.py", "script": "run", "parser": "create_parser", "help": "Download recent pregenerated MANIFEST.json file", "virtualenv": false}}
+ {"path": "download.py", "script": "run", "parser": "create_parser", "help": "Download recent pregenerated MANIFEST.json file",
+  "virtualenv": false}}
--- a/testing/web-platform/tests/tools/manifest/item.py
+++ b/testing/web-platform/tests/tools/manifest/item.py
@@ -1,27 +1,25 @@
 from six.moves.urllib.parse import urljoin, urlparse
 from abc import ABCMeta, abstractproperty
 
 
-class SourceFileCache(object):
-    def __init__(self):
-        self.source_files = {}
-
-    def make_new(self, tests_root, path, url_base):
+def get_source_file(source_files, tests_root, manifest, path):
+    def make_new():
         from .sourcefile import SourceFile
 
-        return SourceFile(tests_root, path, url_base)
+        return SourceFile(tests_root, path, manifest.url_base)
 
-    def get(self, tests_root, manifest, path):
+    if source_files is None:
+        return make_new()
 
-        if path not in self.source_files:
-            self.source_files[path] = self.make_new(tests_root, path, manifest.url_base)
+    if path not in source_files:
+        source_files[path] = make_new()
 
-        return self.source_files[path]
+    return source_files[path]
 
 
 item_types = {}
 
 
 class ManifestItemMeta(ABCMeta):
     """Custom metaclass that registers all the subclasses in the
     item_types dictionary according to the value of their item_type
@@ -34,19 +32,18 @@ class ManifestItemMeta(ABCMeta):
         return rv
 
 
 class ManifestItem(object):
     __metaclass__ = ManifestItemMeta
 
     item_type = None
 
-    source_file_cache = SourceFileCache()
-
     def __init__(self, source_file, manifest=None):
+        self.manifest = manifest
         self.source_file = source_file
 
     @abstractproperty
     def id(self):
         """The test's id (usually its url)"""
         pass
 
     @property
@@ -82,18 +79,18 @@ class ManifestItem(object):
 
     def __repr__(self):
         return "<%s.%s id=%s, path=%s>" % (self.__module__, self.__class__.__name__, self.id, self.path)
 
     def to_json(self):
         return [{}]
 
     @classmethod
-    def from_json(cls, manifest, tests_root, path, obj):
-        source_file = cls.source_file_cache.get(tests_root, manifest, path)
+    def from_json(cls, manifest, tests_root, path, obj, source_files=None):
+        source_file = get_source_file(source_files, tests_root, manifest, path)
         return cls(source_file,
                    manifest=manifest)
 
 
 class URLManifestItem(ManifestItem):
     def __init__(self, source_file, url, url_base="/", manifest=None):
         ManifestItem.__init__(self, source_file, manifest=manifest)
         self._url = url
@@ -111,18 +108,18 @@ class URLManifestItem(ManifestItem):
     def url(self):
         return urljoin(self.url_base, self._url)
 
     def to_json(self):
         rv = [self._url, {}]
         return rv
 
     @classmethod
-    def from_json(cls, manifest, tests_root, path, obj):
-        source_file = cls.source_file_cache.get(tests_root, manifest, path)
+    def from_json(cls, manifest, tests_root, path, obj, source_files=None):
+        source_file = get_source_file(source_files, tests_root, manifest, path)
         url, extras = obj
         return cls(source_file,
                    url,
                    url_base=manifest.url_base,
                    manifest=manifest)
 
 
 class TestharnessTest(URLManifestItem):
@@ -143,18 +140,18 @@ class TestharnessTest(URLManifestItem):
             rv[-1]["timeout"] = self.timeout
         if self.testdriver:
             rv[-1]["testdriver"] = self.testdriver
         if self.jsshell:
             rv[-1]["jsshell"] = True
         return rv
 
     @classmethod
-    def from_json(cls, manifest, tests_root, path, obj):
-        source_file = cls.source_file_cache.get(tests_root, manifest, path)
+    def from_json(cls, manifest, tests_root, path, obj, source_files=None):
+        source_file = get_source_file(source_files, tests_root, manifest, path)
 
         url, extras = obj
         return cls(source_file,
                    url,
                    url_base=manifest.url_base,
                    timeout=extras.get("timeout"),
                    testdriver=bool(extras.get("testdriver")),
                    jsshell=bool(extras.get("jsshell")),
@@ -185,18 +182,18 @@ class RefTestNode(URLManifestItem):
             extras["timeout"] = self.timeout
         if self.viewport_size is not None:
             extras["viewport_size"] = self.viewport_size
         if self.dpi is not None:
             extras["dpi"] = self.dpi
         return rv
 
     @classmethod
-    def from_json(cls, manifest, tests_root, path, obj):
-        source_file = cls.source_file_cache.get(tests_root, manifest, path)
+    def from_json(cls, manifest, tests_root, path, obj, source_files=None):
+        source_file = get_source_file(source_files, tests_root, manifest, path)
         url, references, extras = obj
         return cls(source_file,
                    url,
                    references,
                    url_base=manifest.url_base,
                    timeout=extras.get("timeout"),
                    viewport_size=extras.get("viewport_size"),
                    dpi=extras.get("dpi"),
@@ -246,18 +243,18 @@ class WebDriverSpecTest(URLManifestItem)
 
     def to_json(self):
         rv = URLManifestItem.to_json(self)
         if self.timeout is not None:
             rv[-1]["timeout"] = self.timeout
         return rv
 
     @classmethod
-    def from_json(cls, manifest, tests_root, path, obj):
-        source_file = cls.source_file_cache.get(tests_root, manifest, path)
+    def from_json(cls, manifest, tests_root, path, obj, source_files=None):
+        source_file = get_source_file(source_files, tests_root, manifest, path)
 
         url, extras = obj
         return cls(source_file,
                    url,
                    url_base=manifest.url_base,
                    timeout=extras.get("timeout"),
                    manifest=manifest)
 
--- a/testing/web-platform/tests/tools/manifest/manifest.py
+++ b/testing/web-platform/tests/tools/manifest/manifest.py
@@ -1,23 +1,18 @@
 import itertools
+import json
 import os
 from collections import defaultdict
-from six import iteritems, iterkeys, itervalues, string_types
+from six import iteritems, itervalues, viewkeys, string_types
 
-from . import vcs
-from .item import (ManualTest, WebDriverSpecTest, Stub, RefTestNode, RefTest,
-                   TestharnessTest, SupportFile, ConformanceCheckerTest, VisualTest)
+from .item import ManualTest, WebDriverSpecTest, Stub, RefTestNode, RefTest, TestharnessTest, SupportFile, ConformanceCheckerTest, VisualTest
 from .log import get_logger
 from .utils import from_os_path, to_os_path
 
-try:
-    import ujson as json
-except ImportError:
-    import json
 
 CURRENT_VERSION = 5
 
 
 class ManifestError(Exception):
     pass
 
 
@@ -27,195 +22,32 @@ class ManifestVersionMismatch(ManifestEr
 
 def iterfilter(filters, iter):
     for f in filters:
         iter = f(iter)
     for item in iter:
         yield item
 
 
-item_classes = {"testharness": TestharnessTest,
-                "reftest": RefTest,
-                "reftest_node": RefTestNode,
-                "manual": ManualTest,
-                "stub": Stub,
-                "wdspec": WebDriverSpecTest,
-                "conformancechecker": ConformanceCheckerTest,
-                "visual": VisualTest,
-                "support": SupportFile}
-
-
-class TypeData(object):
-    def __init__(self, manifest, type_cls, meta_filters):
-        """Dict-like object containing the TestItems for each test type.
-
-        Loading an actual Item class for each test is unnecessarily
-        slow, so this class allows lazy-loading of the test
-        items. When the manifest is loaded we store the raw json
-        corresponding to the test type, and only create an Item
-        subclass when the test is accessed. In order to remain
-        API-compatible with consumers that depend on getting an Item
-        from iteration, we do egerly load all items when iterating
-        over the class."""
-        self.manifest = manifest
-        self.type_cls = type_cls
-        self.json_data = {}
-        self.tests_root = None
-        self.data = {}
-        self.meta_filters = meta_filters or []
-
-    def __getitem__(self, key):
-        if key not in self.data:
-            self.load(key)
-        return self.data[key]
-
-    def __bool__(self):
-        return bool(self.data)
-
-    def __len__(self):
-        rv = len(self.data)
-        if self.json_data is not None:
-            rv += len(self.json_data)
-        return rv
-
-    def __delitem__(self, key):
-        del self.data[key]
-
-    def __setitem__(self, key, value):
-        self.data[key] = value
-
-    def __contains__(self, key):
-        self.load_all()
-        return key in self.data
-
-    def __iter__(self):
-        self.load_all()
-        return self.data.__iter__()
-
-    def pop(self, key, default=None):
-        try:
-            value = self[key]
-        except ValueError:
-            value = default
-        else:
-            del self.data[key]
-        return value
-
-    def get(self, key, default=None):
-        try:
-            return self[key]
-        except ValueError:
-            return default
-
-    def itervalues(self):
-        self.load_all()
-        return itervalues(self.data)
-
-    def iteritems(self):
-        self.load_all()
-        return iteritems(self.data)
-
-    def values(self):
-        return self.itervalues()
-
-    def items(self):
-        return self.iteritems()
-
-    def load(self, key):
-        """Load a specific Item given a path"""
-        if self.json_data is not None:
-            data = set()
-            path = from_os_path(key)
-            for test in iterfilter(self.meta_filters, self.json_data.get(path, [])):
-                manifest_item = self.type_cls.from_json(self.manifest,
-                                                        self.tests_root,
-                                                        path,
-                                                        test)
-                data.add(manifest_item)
-            try:
-                del self.json_data[path]
-            except KeyError:
-                pass
-            self.data[key] = data
-        else:
-            raise ValueError
-
-    def load_all(self):
-        """Load all test items in this class"""
-        if self.json_data is not None:
-            for path, value in iteritems(self.json_data):
-                key = to_os_path(path)
-                if key in self.data:
-                    continue
-                data = set()
-                for test in iterfilter(self.meta_filters, self.json_data.get(path, [])):
-                    manifest_item = self.type_cls.from_json(self.manifest,
-                                                            self.tests_root,
-                                                            path,
-                                                            test)
-                    data.add(manifest_item)
-                self.data[key] = data
-            self.json_data = None
-
-    def set_json(self, tests_root, data):
-        if not isinstance(data, dict):
-            raise ValueError("Got a %s expected a dict" % (type(data)))
-        self.tests_root = tests_root
-        self.json_data = data
-
-    def paths(self):
-        """Get a list of all paths containing items of this type,
-        without actually constructing all the items"""
-        rv = set(iterkeys(self.data))
-        if self.json_data:
-            rv |= set(to_os_path(item) for item in iterkeys(self.json_data))
-        return rv
-
-
-class ManifestData(dict):
-    def __init__(self, manifest, meta_filters=None):
-        """Dictionary subclass containing a TypeData instance for each test type,
-        keyed by type name"""
-        self.initialized = False
-        for key, value in iteritems(item_classes):
-            self[key] = TypeData(manifest, value, meta_filters=meta_filters)
-        self.initialized = True
-        self.json_obj = None
-
-    def __setitem__(self, key, value):
-        if self.initialized:
-            raise AttributeError
-        dict.__setitem__(self, key, value)
-
-    def paths(self):
-        """Get a list of all paths containing test items
-        without actually constructing all the items"""
-        rv = set()
-        for item_data in itervalues(self):
-            rv |= set(item_data.paths())
-        return rv
-
-
 class Manifest(object):
-    def __init__(self, url_base="/", meta_filters=None):
+    def __init__(self, url_base="/"):
         assert url_base is not None
         self._path_hash = {}
-        self._data = ManifestData(self, meta_filters)
+        self._data = defaultdict(dict)
         self._reftest_nodes_by_url = None
         self.url_base = url_base
 
     def __iter__(self):
         return self.itertypes()
 
     def itertypes(self, *types):
         if not types:
             types = sorted(self._data.keys())
         for item_type in types:
-            for path in sorted(self._data[item_type]):
-                tests = self._data[item_type][path]
+            for path, tests in sorted(iteritems(self._data[item_type])):
                 yield item_type, path, tests
 
     def iterpath(self, path):
         for type_tests in self._data.values():
             for test in type_tests.get(path, set()):
                 yield test
 
     def iterdir(self, dir_name):
@@ -237,96 +69,71 @@ class Manifest(object):
                     by_url[node.url] = node
             self._reftest_nodes_by_url = by_url
         return self._reftest_nodes_by_url
 
     def get_reference(self, url):
         return self.reftest_nodes_by_url.get(url)
 
     def update(self, tree):
-        """Update the manifest given an iterable of items that make up the updated manifest.
+        new_data = defaultdict(dict)
+        new_hashes = {}
 
-        The iterable must either generate tuples of the form (SourceFile, True) for paths
-        that are to be updated, or (path, False) for items that are not to be updated. This
-        unusual API is designed as an optimistaion meaning that SourceFile items need not be
-        constructed in the case we are not updating a path, but the absence of an item from
-        the iterator may be used to remove defunct entries from the manifest."""
         reftest_nodes = []
-        seen_files = set()
+        old_files = defaultdict(set, {k: set(viewkeys(v)) for k, v in iteritems(self._data)})
 
         changed = False
         reftest_changes = False
 
-        prev_files = self._data.paths()
-
-        reftest_types = ("reftest", "reftest_node")
+        for source_file in tree:
+            rel_path = source_file.rel_path
+            file_hash = source_file.hash
 
-        for source_file, update in tree:
-            if not update:
-                rel_path = source_file
-                seen_files.add(rel_path)
-            else:
-                rel_path = source_file.rel_path
-                seen_files.add(rel_path)
-
-                file_hash = source_file.hash
+            is_new = rel_path not in self._path_hash
+            hash_changed = False
 
-                is_new = rel_path not in self._path_hash
-                hash_changed = False
-
-                if not is_new:
-                    old_hash, old_type = self._path_hash[rel_path]
-                    if old_hash != file_hash:
-                        new_type, manifest_items = source_file.manifest_items()
-                        hash_changed = True
-                    else:
-                        new_type, manifest_items = old_type, self._data[old_type][rel_path]
-                    if old_type in reftest_types and new_type != old_type:
-                        reftest_changes = True
+            if not is_new:
+                old_hash, old_type = self._path_hash[rel_path]
+                old_files[old_type].remove(rel_path)
+                if old_hash != file_hash:
+                    new_type, manifest_items = source_file.manifest_items()
+                    hash_changed = True
                 else:
-                    new_type, manifest_items = source_file.manifest_items()
+                    new_type, manifest_items = old_type, self._data[old_type][rel_path]
+                if old_type in ("reftest", "reftest_node") and new_type != old_type:
+                    reftest_changes = True
+            else:
+                new_type, manifest_items = source_file.manifest_items()
 
-                if new_type in ("reftest", "reftest_node"):
-                    reftest_nodes.extend(manifest_items)
-                    if is_new or hash_changed:
-                        reftest_changes = True
-                elif new_type:
-                    self._data[new_type][rel_path] = set(manifest_items)
+            if new_type in ("reftest", "reftest_node"):
+                reftest_nodes.extend(manifest_items)
+                if is_new or hash_changed:
+                    reftest_changes = True
+            elif new_type:
+                new_data[new_type][rel_path] = set(manifest_items)
+
+            new_hashes[rel_path] = (file_hash, new_type)
 
-                self._path_hash[rel_path] = (file_hash, new_type)
+            if is_new or hash_changed:
+                changed = True
 
-                if is_new or hash_changed:
-                    changed = True
+        if reftest_changes or old_files["reftest"] or old_files["reftest_node"]:
+            reftests, reftest_nodes, changed_hashes = self._compute_reftests(reftest_nodes)
+            new_data["reftest"] = reftests
+            new_data["reftest_node"] = reftest_nodes
+            new_hashes.update(changed_hashes)
+        else:
+            new_data["reftest"] = self._data["reftest"]
+            new_data["reftest_node"] = self._data["reftest_node"]
 
-        deleted = prev_files - seen_files
-        if deleted:
+        if any(itervalues(old_files)):
             changed = True
-            for rel_path in deleted:
-                if rel_path in self._path_hash:
-                    _, old_type = self._path_hash[rel_path]
-                    if old_type in reftest_types:
-                        reftest_changes = True
-                    try:
-                        del self._path_hash[rel_path]
-                    except KeyError:
-                        pass
-                    try:
-                        del self._data[old_type][rel_path]
-                    except KeyError:
-                        pass
-                else:
-                    for test_data in itervalues(self._data):
-                        if rel_path in test_data:
-                            del test_data[rel_path]
 
-        if reftest_changes:
-            reftests, reftest_nodes, changed_hashes = self._compute_reftests(reftest_nodes)
-            self._data["reftest"].data = reftests
-            self._data["reftest_node"].data = reftest_nodes
-            self._path_hash.update(changed_hashes)
+        self._data = new_data
+        self._path_hash = new_hashes
 
         return changed
 
     def _compute_reftests(self, reftest_nodes):
         self._reftest_nodes_by_url = {}
         has_inbound = set()
         for item in reftest_nodes:
             for ref_url, ref_type in item.references:
@@ -356,122 +163,94 @@ class Manifest(object):
 
     def to_json(self):
         out_items = {
             test_type: {
                 from_os_path(path):
                 [t for t in sorted(test.to_json() for test in tests)]
                 for path, tests in iteritems(type_paths)
             }
-            for test_type, type_paths in iteritems(self._data) if type_paths
+            for test_type, type_paths in iteritems(self._data)
         }
         rv = {"url_base": self.url_base,
               "paths": {from_os_path(k): v for k, v in iteritems(self._path_hash)},
               "items": out_items,
               "version": CURRENT_VERSION}
         return rv
 
     @classmethod
     def from_json(cls, tests_root, obj, types=None, meta_filters=None):
         version = obj.get("version")
         if version != CURRENT_VERSION:
             raise ManifestVersionMismatch
 
-        self = cls(url_base=obj.get("url_base", "/"), meta_filters=meta_filters)
+        self = cls(url_base=obj.get("url_base", "/"))
         if not hasattr(obj, "items") and hasattr(obj, "paths"):
             raise ManifestError
 
         self._path_hash = {to_os_path(k): v for k, v in iteritems(obj["paths"])}
 
+        item_classes = {"testharness": TestharnessTest,
+                        "reftest": RefTest,
+                        "reftest_node": RefTestNode,
+                        "manual": ManualTest,
+                        "stub": Stub,
+                        "wdspec": WebDriverSpecTest,
+                        "conformancechecker": ConformanceCheckerTest,
+                        "visual": VisualTest,
+                        "support": SupportFile}
+
+        meta_filters = meta_filters or []
+
+        source_files = {}
+
         for test_type, type_paths in iteritems(obj["items"]):
             if test_type not in item_classes:
                 raise ManifestError
 
             if types and test_type not in types:
                 continue
 
-            self._data[test_type].set_json(tests_root, type_paths)
+            test_cls = item_classes[test_type]
+            tests = defaultdict(set)
+            for path, manifest_tests in iteritems(type_paths):
+                path = to_os_path(path)
+                for test in iterfilter(meta_filters, manifest_tests):
+                    manifest_item = test_cls.from_json(self,
+                                                       tests_root,
+                                                       path,
+                                                       test,
+                                                       source_files=source_files)
+                    tests[path].add(manifest_item)
+            self._data[test_type] = tests
 
         return self
 
 
 def load(tests_root, manifest, types=None, meta_filters=None):
     logger = get_logger()
 
-    logger.warning("Prefer load_and_update instead")
-    return _load(logger, tests_root, manifest, types, meta_filters)
-
-
-def _load(logger, tests_root, manifest, types=None, meta_filters=None):
     # "manifest" is a path or file-like object.
     if isinstance(manifest, string_types):
         if os.path.exists(manifest):
             logger.debug("Opening manifest at %s" % manifest)
         else:
             logger.debug("Creating new manifest at %s" % manifest)
         try:
             with open(manifest) as f:
-                rv = Manifest.from_json(tests_root,
-                                        json.load(f),
-                                        types=types,
-                                        meta_filters=meta_filters)
+                rv = Manifest.from_json(tests_root, json.load(f), types=types, meta_filters=meta_filters)
         except IOError:
             return None
         except ValueError:
             logger.warning("%r may be corrupted", manifest)
             return None
         return rv
 
-    return Manifest.from_json(tests_root,
-                              json.load(manifest),
-                              types=types,
-                              meta_filters=meta_filters)
-
-
-def load_and_update(tests_root,
-                    manifest_path,
-                    url_base,
-                    update=True,
-                    rebuild=False,
-                    metadata_path=None,
-                    cache_root=None,
-                    working_copy=False,
-                    types=None,
-                    meta_filters=None,
-                    write_manifest=True):
-    logger = get_logger()
-
-    manifest = None
-    if not rebuild:
-        try:
-            manifest = _load(logger,
-                             tests_root,
-                             manifest_path,
-                             types=types,
-                             meta_filters=meta_filters)
-        except ManifestVersionMismatch:
-            logger.info("Manifest version changed, rebuilding")
-
-        if manifest is not None and manifest.url_base != url_base:
-            logger.info("Manifest url base did not match, rebuilding")
-
-    if manifest is None:
-        manifest = Manifest(url_base, meta_filters=meta_filters)
-        update = True
-
-    if update:
-        tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
-                            working_copy, rebuild)
-        changed = manifest.update(tree)
-        if write_manifest and changed:
-            write(manifest, manifest_path)
-        tree.dump_caches()
-
-    return manifest
+    return Manifest.from_json(tests_root, json.load(manifest), types=types, meta_filters=meta_filters)
 
 
 def write(manifest, manifest_path):
     dir_name = os.path.dirname(manifest_path)
     if not os.path.exists(dir_name):
         os.makedirs(dir_name)
     with open(manifest_path, "wb") as f:
-        json.dump(manifest.to_json(), f, sort_keys=True, indent=1)
+        json.dump(manifest.to_json(), f, sort_keys=True, indent=1, separators=(',', ': '))
         f.write("\n")
--- a/testing/web-platform/tests/tools/manifest/tests/test_manifest.py
+++ b/testing/web-platform/tests/tools/manifest/tests/test_manifest.py
@@ -67,17 +67,17 @@ def sourcefile_strategy(draw):
 
 @h.given(hs.lists(sourcefile_strategy(),
                   min_size=1, average_size=10, max_size=1000,
                   unique_by=lambda x: x.rel_path))
 @h.example([SourceFileWithTest("a", "0"*40, item.ConformanceCheckerTest)])
 def test_manifest_to_json(s):
     m = manifest.Manifest()
 
-    assert m.update((item, True) for item in s) is True
+    assert m.update(s) is True
 
     json_str = m.to_json()
     loaded = manifest.Manifest.from_json("/", json_str)
 
     assert list(loaded) == list(m)
 
     assert loaded.to_json() == json_str
 
@@ -85,226 +85,228 @@ def test_manifest_to_json(s):
 @h.given(hs.lists(sourcefile_strategy(),
                   min_size=1, average_size=10,
                   unique_by=lambda x: x.rel_path))
 @h.example([SourceFileWithTest("a", "0"*40, item.TestharnessTest)])
 @h.example([SourceFileWithTest("a", "0"*40, item.RefTest, [("/aa", "==")])])
 def test_manifest_idempotent(s):
     m = manifest.Manifest()
 
-    assert m.update((item, True) for item in s) is True
+    assert m.update(s) is True
 
     m1 = list(m)
 
-    assert m.update((item, True) for item in s) is False
+    assert m.update(s) is False
 
     assert list(m) == m1
 
 
 def test_manifest_to_json_forwardslash():
     m = manifest.Manifest()
 
     s = SourceFileWithTest("a/b", "0"*40, item.TestharnessTest)
 
-    assert m.update([(s, True)]) is True
+    assert m.update([s]) is True
 
     assert m.to_json() == {
         'paths': {
             'a/b': ('0000000000000000000000000000000000000000', 'testharness')
         },
         'version': 5,
         'url_base': '/',
         'items': {
+            'reftest': {},
+            'reftest_node': {},
             'testharness': {
                 'a/b': [['/a/b', {}]]
             }
         }
     }
 
 
 def test_manifest_to_json_backslash():
     m = manifest.Manifest()
 
     s = SourceFileWithTest("a\\b", "0"*40, item.TestharnessTest)
 
     if os.path.sep == "\\":
-        assert m.update([(s, True)]) is True
+        assert m.update([s]) is True
 
         assert m.to_json() == {
             'paths': {
                 'a/b': ('0000000000000000000000000000000000000000', 'testharness')
             },
             'version': 5,
             'url_base': '/',
             'items': {
+                'reftest': {},
+                'reftest_node': {},
                 'testharness': {
                     'a/b': [['/a/b', {}]]
                 }
             }
         }
     else:
         with pytest.raises(ValueError):
             # one of these must raise ValueError
             # the first must return True if it doesn't raise
-            assert m.update([(s, True)]) is True
+            assert m.update([s]) is True
             m.to_json()
 
 
 def test_manifest_from_json_backslash():
     json_obj = {
         'paths': {
             'a\\b': ('0000000000000000000000000000000000000000', 'testharness')
         },
         'version': 5,
         'url_base': '/',
         'items': {
+            'reftest': {},
+            'reftest_node': {},
             'testharness': {
                 'a\\b': [['/a/b', {}]]
             }
         }
     }
 
     with pytest.raises(ValueError):
         manifest.Manifest.from_json("/", json_obj)
 
 
 def test_reftest_computation_chain():
     m = manifest.Manifest()
 
     s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
     s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
 
-    m.update([(s1, True), (s2, True)])
+    m.update([s1, s2])
 
     test1 = s1.manifest_items()[1][0]
     test2 = s2.manifest_items()[1][0]
     test2_node = test2.to_RefTestNode()
 
     assert list(m) == [("reftest", test1.path, {test1}),
                        ("reftest_node", test2.path, {test2_node})]
 
 
 def test_reftest_computation_chain_update_add():
     m = manifest.Manifest()
 
     s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
     test2 = s2.manifest_items()[1][0]
 
-    assert m.update([(s2, True)]) is True
+    assert m.update([s2]) is True
 
     assert list(m) == [("reftest", test2.path, {test2})]
 
     s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
     test1 = s1.manifest_items()[1][0]
 
     # s2's hash is unchanged, but it has gone from a test to a node
-    assert m.update([(s1, True), (s2, True)]) is True
+    assert m.update([s1, s2]) is True
 
     test2_node = test2.to_RefTestNode()
 
     assert list(m) == [("reftest", test1.path, {test1}),
                        ("reftest_node", test2.path, {test2_node})]
 
 
 def test_reftest_computation_chain_update_remove():
     m = manifest.Manifest()
 
     s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
     s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
 
-    assert m.update([(s1, True), (s2, True)]) is True
+    assert m.update([s1, s2]) is True
 
     test1 = s1.manifest_items()[1][0]
     test2 = s2.manifest_items()[1][0]
     test2_node = test2.to_RefTestNode()
 
     assert list(m) == [("reftest", test1.path, {test1}),
                        ("reftest_node", test2.path, {test2_node})]
 
     # s2's hash is unchanged, but it has gone from a node to a test
-    assert m.update([(s2, True)]) is True
+    assert m.update([s2]) is True
 
     assert list(m) == [("reftest", test2.path, {test2})]
 
 
 def test_reftest_computation_chain_update_test_type():
     m = manifest.Manifest()
 
     s1 = SourceFileWithTest("test", "0"*40, item.RefTest, [("/test-ref", "==")])
 
-    assert m.update([(s1, True)]) is True
+    assert m.update([s1]) is True
 
     test1 = s1.manifest_items()[1][0]
 
     assert list(m) == [("reftest", test1.path, {test1})]
 
     # test becomes a testharness test (hash change because that is determined
     # based on the file contents). The updated manifest should not includes the
     # old reftest.
     s2 = SourceFileWithTest("test", "1"*40, item.TestharnessTest)
-    assert m.update([(s2, True)]) is True
+    assert m.update([s2]) is True
 
     test2 = s2.manifest_items()[1][0]
 
     assert list(m) == [("testharness", test2.path, {test2})]
 
 
 def test_reftest_computation_chain_update_node_change():
     m = manifest.Manifest()
 
     s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
     s2 = SourceFileWithTest("test2", "0"*40, item.RefTestNode, [("/test3", "==")])
 
-    assert m.update([(s1, True), (s2, True)]) is True
+    assert m.update([s1, s2]) is True
 
     test1 = s1.manifest_items()[1][0]
     test2 = s2.manifest_items()[1][0]
 
     assert list(m) == [("reftest", test1.path, {test1}),
                        ("reftest_node", test2.path, {test2})]
 
     #test2 changes to support type
     s2 = SourceFileWithTest("test2", "1"*40, item.SupportFile)
 
-    assert m.update([(s1, True), (s2, True)]) is True
+    assert m.update([s1,s2]) is True
     test3 = s2.manifest_items()[1][0]
 
     assert list(m) == [("reftest", test1.path, {test1}),
                        ("support", test3.path, {test3})]
 
 
 def test_iterpath():
     m = manifest.Manifest()
 
-    # This has multiple test types from the same file, which isn't really supported,
-    # so pretend they have different hashes
     sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
                SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test2-ref", "==")]),
-               SourceFileWithTests("test2", "1"*40, item.TestharnessTest, [("/test2-1.html",),
+               SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("/test2-1.html",),
                                                                            ("/test2-2.html",)]),
                SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
-    m.update([(s, True) for s in sources])
+    m.update(sources)
 
     assert set(item.url for item in m.iterpath("test2")) == set(["/test2",
                                                                  "/test2-1.html",
                                                                  "/test2-2.html"])
     assert set(m.iterpath("missing")) == set()
 
 
 def test_filter():
     m = manifest.Manifest()
 
-    # This has multiple test types from the same file, which isn't really supported,
-    # so pretend they have different hashes
     sources = [SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test1-ref", "==")]),
-               SourceFileWithTest("test2", "1"*40, item.RefTest, [("/test2-ref", "==")]),
+               SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test2-ref", "==")]),
                SourceFileWithTests("test2", "0"*40, item.TestharnessTest, [("/test2-1.html",),
                                                                            ("/test2-2.html",)]),
                SourceFileWithTest("test3", "0"*40, item.TestharnessTest)]
-    m.update([(s, True) for s in sources])
+    m.update(sources)
 
     json = m.to_json()
 
     def filter(it):
         for test in it:
             if test[0] in ["/test2-2.html", "/test3"]:
                 yield test
 
@@ -321,60 +323,19 @@ def test_filter():
 
 
 def test_reftest_node_by_url():
     m = manifest.Manifest()
 
     s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
     s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
 
-    m.update([(s1, True), (s2, True)])
+    m.update([s1, s2])
 
     test1 = s1.manifest_items()[1][0]
     test2 = s2.manifest_items()[1][0]
     test2_node = test2.to_RefTestNode()
 
     assert m.reftest_nodes_by_url == {"/test1": test1,
                                       "/test2": test2_node}
     m._reftest_nodes_by_url = None
     assert m.reftest_nodes_by_url == {"/test1": test1,
                                       "/test2": test2_node}
-
-
-def test_no_update():
-    m = manifest.Manifest()
-
-    s1 = SourceFileWithTest("test1", "0"*40, item.TestharnessTest)
-    s2 = SourceFileWithTest("test2", "0"*40, item.TestharnessTest)
-
-    m.update([(s1, True), (s2, True)])
-
-    test1 = s1.manifest_items()[1][0]
-    test2 = s2.manifest_items()[1][0]
-
-    assert list(m) == [("testharness", test1.path, {test1}),
-                       ("testharness", test2.path, {test2})]
-
-    s1_1 = SourceFileWithTest("test1", "1"*40, item.TestharnessTest)
-
-    m.update([(s1, True), (s2.rel_path, False)])
-
-    test1_1 = s1_1.manifest_items()[1][0]
-
-    assert list(m) == [("testharness", test1_1.path, {test1_1}),
-                       ("testharness", test2.path, {test2})]
-
-
-def test_no_update_delete():
-    m = manifest.Manifest()
-
-    s1 = SourceFileWithTest("test1", "0"*40, item.TestharnessTest)
-    s2 = SourceFileWithTest("test2", "0"*40, item.TestharnessTest)
-
-    m.update([(s1, True), (s2, True)])
-
-    s1_1 = SourceFileWithTest("test1", "1"*40, item.TestharnessTest)
-
-    m.update([(s1, True)])
-
-    test1_1 = s1_1.manifest_items()[1][0]
-
-    assert list(m) == [("testharness", test1_1.path, {test1_1})]
--- a/testing/web-platform/tests/tools/manifest/update.py
+++ b/testing/web-platform/tests/tools/manifest/update.py
@@ -8,46 +8,52 @@ from .log import get_logger
 from .download import download_from_github
 
 here = os.path.dirname(__file__)
 
 wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
 
 logger = get_logger()
 
+def update(tests_root, manifest, working_copy=False):
+    logger.info("Updating manifest")
+    tree = None
+    if not working_copy:
+        tree = vcs.Git.for_path(tests_root, manifest.url_base)
+    if tree is None:
+        tree = vcs.FileSystem(tests_root, manifest.url_base)
 
-def update(tests_root,
-           manifest,
-           manifest_path=None,
-           working_copy=False,
-           cache_root=None,
-           rebuild=False):
-    logger.warning("Deprecated; use manifest.load_and_update instead")
-    logger.info("Updating manifest")
-
-    tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
-                        working_copy, rebuild)
     return manifest.update(tree)
 
 
 def update_from_cli(**kwargs):
     tests_root = kwargs["tests_root"]
     path = kwargs["path"]
     assert tests_root is not None
 
+    m = None
+
     if kwargs["download"]:
         download_from_github(path, tests_root)
 
-    manifest.load_and_update(tests_root,
-                             path,
-                             kwargs["url_base"],
-                             update=True,
-                             rebuild=kwargs["rebuild"],
-                             cache_root=kwargs["cache_root"],
-                             working_copy=kwargs["work"])
+    if not kwargs.get("rebuild", False):
+        try:
+            m = manifest.load(tests_root, path)
+        except manifest.ManifestVersionMismatch:
+            logger.info("Manifest version changed, rebuilding")
+            m = None
+
+    if m is None:
+        m = manifest.Manifest(kwargs["url_base"])
+
+    changed = update(tests_root,
+                     m,
+                     working_copy=kwargs["work"])
+    if changed:
+        manifest.write(m, path)
 
 
 def abs_path(path):
     return os.path.abspath(os.path.expanduser(path))
 
 
 def create_parser():
     parser = argparse.ArgumentParser()
@@ -62,35 +68,33 @@ def create_parser():
         "--work", action="store_true", default=False,
         help="Build from the working tree rather than the latest commit")
     parser.add_argument(
         "--url-base", action="store", default="/",
         help="Base url to use as the mount point for tests in this manifest.")
     parser.add_argument(
         "--no-download", dest="download", action="store_false", default=True,
         help="Never attempt to download the manifest.")
-    parser.add_argument(
-        "--cache-root", action="store", default=os.path.join(wpt_root, ".wptcache"),
-        help="Path in which to store any caches (default <tests_root>/.wptcache/")
     return parser
 
 
 def find_top_repo():
     path = here
     rv = None
     while path != "/":
         if vcs.is_git_repo(path):
             rv = path
         path = os.path.abspath(os.path.join(path, os.pardir))
 
     return rv
 
 
-def run(*args, **kwargs):
+def run(**kwargs):
     if kwargs["path"] is None:
         kwargs["path"] = os.path.join(kwargs["tests_root"], "MANIFEST.json")
+
     update_from_cli(**kwargs)
 
 
 def main():
     opts = create_parser().parse_args()
 
     run(**vars(opts))
--- a/testing/web-platform/tests/tools/manifest/vcs.py
+++ b/testing/web-platform/tests/tools/manifest/vcs.py
@@ -1,72 +1,40 @@
-import json
 import os
+import subprocess
 import platform
-import stat
-import subprocess
-from collections import deque
 
 from .sourcefile import SourceFile
 
 
-def get_tree(tests_root, manifest, manifest_path, cache_root,
-             working_copy=False, rebuild=False):
-    tree = None
-    if cache_root is None:
-        cache_root = os.path.join(tests_root, ".wptcache")
-    if not os.path.exists(cache_root):
-        try:
-            os.makedirs(cache_root)
-        except IOError:
-            cache_root = None
-
-    if not working_copy:
-        tree = Git.for_path(tests_root,
-                            manifest.url_base,
-                            manifest_path=manifest_path,
-                            cache_path=cache_root,
-                            rebuild=rebuild)
-    if tree is None:
-        tree = FileSystem(tests_root,
-                          manifest.url_base,
-                          manifest_path=manifest_path,
-                          cache_path=cache_root,
-                          rebuild=rebuild)
-    return tree
-
-
 class Git(object):
-    def __init__(self, repo_root, url_base, cache_path, manifest_path=None,
-                 rebuild=False):
-        self.root = repo_root
+    def __init__(self, repo_root, url_base):
+        self.root = os.path.abspath(repo_root)
         self.git = Git.get_func(repo_root)
         self.url_base = url_base
-        # rebuild is a noop for now since we don't cache anything
 
     @staticmethod
     def get_func(repo_path):
         def git(cmd, *args):
             full_cmd = ["git", cmd] + list(args)
             try:
                 return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
             except Exception as e:
                 if platform.uname()[0] == "Windows" and isinstance(e, WindowsError):
                         full_cmd[0] = "git.bat"
                         return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT)
                 else:
                     raise
         return git
 
     @classmethod
-    def for_path(cls, path, url_base, cache_path, manifest_path=None, rebuild=False):
+    def for_path(cls, path, url_base):
         git = Git.get_func(path)
         try:
-            return cls(git("rev-parse", "--show-toplevel").rstrip(), url_base, cache_path,
-                       manifest_path=manifest_path, rebuild=rebuild)
+            return cls(git("rev-parse", "--show-toplevel").rstrip(), url_base)
         except subprocess.CalledProcessError:
             return None
 
     def _local_changes(self):
         changes = {}
         cmd = ["status", "-z", "--ignore-submodules=all"]
         data = self.git(*cmd)
 
@@ -101,199 +69,32 @@ class Git(object):
                 if rel_path in local_changes:
                     contents = self._show_file(rel_path)
                 else:
                     contents = None
                 yield SourceFile(self.root,
                                  rel_path,
                                  self.url_base,
                                  hash,
-                                 contents=contents), True
-
-    def dump_caches(self):
-        pass
+                                 contents=contents)
 
 
 class FileSystem(object):
-    def __init__(self, root, url_base, cache_path, manifest_path=None, rebuild=False):
-        from gitignore import gitignore
-        self.root = os.path.abspath(root)
+    def __init__(self, root, url_base):
+        self.root = root
         self.url_base = url_base
-        self.ignore_cache = None
-        self.mtime_cache = None
-        if cache_path is not None:
-            if manifest_path is not None:
-                self.mtime_cache = MtimeCache(cache_path, root, manifest_path, rebuild)
-            if gitignore.has_ignore(root):
-                self.ignore_cache = GitIgnoreCache(cache_path, root, rebuild)
-        self.path_filter = gitignore.PathFilter(self.root,
-                                                extras=[".git/"],
-                                                cache=self.ignore_cache)
+        from gitignore import gitignore
+        self.path_filter = gitignore.PathFilter(self.root, extras=[".git/"])
 
     def __iter__(self):
-        mtime_cache = self.mtime_cache
-        for dirpath, dirnames, filenames in self.path_filter(walk(self.root)):
-            for filename, path_stat in filenames:
-                path = os.path.join(dirpath, filename)
-                if mtime_cache is None or mtime_cache.updated(path, path_stat):
-                    yield SourceFile(self.root, path, self.url_base), True
-                else:
-                    yield path, False
-
-    def dump_caches(self):
-        for cache in [self.mtime_cache, self.ignore_cache]:
-            if cache is not None:
-                cache.dump()
-
-
-class CacheFile(object):
-    file_name = None
-
-    def __init__(self, cache_root, tests_root, rebuild=False):
-        self.tests_root = tests_root
-        if not os.path.exists(cache_root):
-            os.makedirs(cache_root)
-        self.path = os.path.join(cache_root, self.file_name)
-        self.modified = False
-        self.data = self.load(rebuild)
-
-    def dump(self):
-        if not self.modified:
-            return
-        with open(self.path, 'w') as f:
-            json.dump(self.data, f, indent=1)
-
-    def load(self, rebuild=False):
-        data = {}
-        try:
-            if not rebuild:
-                with open(self.path, 'r') as f:
-                    data = json.load(f)
-                data = self.check_valid(data)
-        except IOError:
-            pass
-        return data
-
-    def check_valid(self, data):
-        """Check if the cached data is valid and return an updated copy of the
-        cache containing only data that can be used."""
-        return data
-
-
-class MtimeCache(CacheFile):
-    file_name = "mtime.json"
-
-    def __init__(self, cache_root, tests_root, manifest_path, rebuild=False):
-        self.manifest_path = manifest_path
-        super(MtimeCache, self).__init__(cache_root, tests_root, rebuild=False)
-
-    def updated(self, rel_path, stat):
-        """Return a boolean indicating whether the file changed since the cache was last updated.
-
-        This implicitly updates the cache with the new mtime data."""
-        mtime = stat.st_mtime
-        if mtime != self.data.get(rel_path):
-            self.modified = True
-            self.data[rel_path] = mtime
-            return True
-        return False
-
-    def check_valid(self, data):
-        if data.get("/tests_root") != self.tests_root:
-            self.modified = True
-        else:
-            if self.manifest_path is not None and os.path.exists(self.manifest_path):
-                mtime = os.path.getmtime(self.manifest_path)
-                if data.get("/manifest_path") != [self.manifest_path, mtime]:
-                    self.modified = True
-            else:
-                self.modified = True
-        if self.modified:
-            data = {}
-            data["/tests_root"] = self.tests_root
-        return data
+        paths = self.get_paths()
+        for path in paths:
+            yield SourceFile(self.root, path, self.url_base)
 
-    def dump(self):
-        if self.manifest_path is None:
-            raise ValueError
-        if not os.path.exists(self.manifest_path):
-            return
-        mtime = os.path.getmtime(self.manifest_path)
-        self.data["/manifest_path"] = [self.manifest_path, mtime]
-        self.data["/tests_root"] = self.tests_root
-        super(MtimeCache, self).dump()
-
-
-class GitIgnoreCache(CacheFile):
-    file_name = "gitignore.json"
-
-    def check_valid(self, data):
-        ignore_path = os.path.join(self.tests_root, ".gitignore")
-        mtime = os.path.getmtime(ignore_path)
-        if data.get("/gitignore_file") != [ignore_path, mtime]:
-            self.modified = True
-            data = {}
-            data["/gitignore_file"] = [ignore_path, mtime]
-        return data
-
-    def __contains__(self, key):
-        return key in self.data
-
-    def __getitem__(self, key):
-        return self.data[key]
-
-    def __setitem__(self, key, value):
-        if self.data.get(key) != value:
-            self.modified = True
-            self.data[key] = value
-
-
-def walk(root):
-    """Re-implementation of os.walk. Returns an iterator over
-    (dirpath, dirnames, filenames), with some semantic differences
-    to os.walk.
+    def get_paths(self):
+        for dirpath, dirnames, filenames in os.walk(self.root):
+            for filename in filenames:
+                path = os.path.relpath(os.path.join(dirpath, filename), self.root)
+                if self.path_filter(path):
+                    yield path
 
-    This has a similar interface to os.walk, with the important difference
-    that instead of lists of filenames and directory names, it yields
-    lists of tuples of the form [(name, stat)] where stat is the result of
-    os.stat for the file. That allows reusing the same stat data in the
-    caller. It also always returns the dirpath relative to the root, with
-    the root iself being returned as the empty string.
-
-    Unlike os.walk the implementation is not recursive."""
-
-    listdir = os.listdir
-    get_stat = os.stat
-    listdir = os.listdir
-    join = os.path.join
-    is_dir = stat.S_ISDIR
-    is_link = stat.S_ISLNK
-    relpath = os.path.relpath
-
-    root = os.path.abspath(root)
-    stack = deque([(root, "")])
-
-    while stack:
-        dir_path, rel_path = stack.popleft()
-        try:
-            # Note that listdir and error are globals in this module due
-            # to earlier import-*.
-            names = listdir(dir_path)
-        except OSError:
-            continue
-
-        dirs, non_dirs = [], []
-        for name in names:
-            path = join(dir_path, name)
-            try:
-                path_stat = get_stat(path)
-            except OSError:
-                continue
-            if is_dir(path_stat.st_mode):
-                dirs.append((name, path_stat))
-            else:
-                non_dirs.append((name, path_stat))
-
-        yield rel_path, dirs, non_dirs
-        for name, path_stat in dirs:
-            new_path = join(dir_path, name)
-            if not is_link(path_stat.st_mode):
-                stack.append((new_path, relpath(new_path, root)))
+            dirnames[:] = [item for item in dirnames if self.path_filter(
+                           os.path.relpath(os.path.join(dirpath, item), self.root) + "/")]
--- a/testing/web-platform/tests/tools/wpt/testfiles.py
+++ b/testing/web-platform/tests/tools/wpt/testfiles.py
@@ -3,17 +3,17 @@ import logging
 import os
 import re
 import subprocess
 import sys
 
 from collections import OrderedDict
 from six import iteritems
 
-from ..manifest import manifest
+from ..manifest import manifest, update
 
 here = os.path.dirname(__file__)
 wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
 
 logger = logging.getLogger()
 
 
 def get_git_cmd(repo_path):
@@ -185,18 +185,20 @@ def _init_manifest_cache():
 
     def load(manifest_path=None):
         if manifest_path is None:
             manifest_path = os.path.join(wpt_root, "MANIFEST.json")
         if c.get(manifest_path):
             return c[manifest_path]
         # cache at most one path:manifest
         c.clear()
-        wpt_manifest = manifest.load_and_update(wpt_root, manifest_path, "/",
-                                                update=True)
+        wpt_manifest = manifest.load(wpt_root, manifest_path)
+        if wpt_manifest is None:
+            wpt_manifest = manifest.Manifest()
+        update.update(wpt_root, wpt_manifest)
         c[manifest_path] = wpt_manifest
         return c[manifest_path]
     return load
 
 
 load_manifest = _init_manifest_cache()
 
 
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/testloader.py
@@ -1,9 +1,10 @@
 import hashlib
+import json
 import os
 import urlparse
 from abc import ABCMeta, abstractmethod
 from Queue import Empty
 from collections import defaultdict, OrderedDict, deque
 from multiprocessing import Queue
 
 import manifestinclude
@@ -361,30 +362,28 @@ class TestFilter(object):
             include_tests = set()
             for test in tests:
                 if self.manifest.include(test):
                     include_tests.add(test)
 
             if include_tests:
                 yield test_type, test_path, include_tests
 
-
 class TagFilter(object):
     def __init__(self, tags):
         self.tags = set(tags)
 
     def __call__(self, test_iter):
         for test in test_iter:
             if test.tags & self.tags:
                 yield test
 
 
 class ManifestLoader(object):
-    def __init__(self, test_paths, force_manifest_update=False, manifest_download=False,
-                 types=None, meta_filters=None):
+    def __init__(self, test_paths, force_manifest_update=False, manifest_download=False, types=None, meta_filters=None):
         do_delayed_imports()
         self.test_paths = test_paths
         self.force_manifest_update = force_manifest_update
         self.manifest_download = manifest_download
         self.types = types
         self.meta_filters = meta_filters or []
         self.logger = structured.get_default_logger()
         if self.logger is None:
@@ -395,22 +394,67 @@ class ManifestLoader(object):
         for url_base, paths in self.test_paths.iteritems():
             manifest_file = self.load_manifest(url_base=url_base,
                                                **paths)
             path_data = {"url_base": url_base}
             path_data.update(paths)
             rv[manifest_file] = path_data
         return rv
 
-    def load_manifest(self, tests_path, manifest_path, metadata_path, url_base="/", **kwargs):
-        cache_root = os.path.join(metadata_path, ".cache")
-        if self.manifest_download:
+    def create_manifest(self, manifest_path, tests_path, url_base="/"):
+        self.update_manifest(manifest_path, tests_path, url_base, recreate=True,
+                             download=self.manifest_download)
+
+    def update_manifest(self, manifest_path, tests_path, url_base="/",
+                        recreate=False, download=False):
+        self.logger.info("Updating test manifest %s" % manifest_path)
+        manifest_log.setup()
+
+        json_data = None
+        if download:
+            # TODO: make this not github-specific
             download_from_github(manifest_path, tests_path)
-        return manifest.load_and_update(tests_path, manifest_path, url_base,
-                                        cache_root=cache_root, update=self.force_manifest_update)
+
+        if not recreate:
+            try:
+                with open(manifest_path) as f:
+                    json_data = json.load(f)
+            except IOError:
+                self.logger.info("Unable to find test manifest")
+            except ValueError:
+                self.logger.info("Unable to parse test manifest")
+
+        if not json_data:
+            self.logger.info("Creating test manifest")
+            manifest_file = manifest.Manifest(url_base)
+        else:
+            try:
+                manifest_file = manifest.Manifest.from_json(tests_path, json_data)
+            except manifest.ManifestVersionMismatch:
+                manifest_file = manifest.Manifest(url_base)
+
+        manifest_update.update(tests_path, manifest_file, True)
+
+        manifest.write(manifest_file, manifest_path)
+
+    def load_manifest(self, tests_path, manifest_path, url_base="/", **kwargs):
+        if (not os.path.exists(manifest_path) or
+            self.force_manifest_update):
+            self.update_manifest(manifest_path, tests_path, url_base, download=self.manifest_download)
+        try:
+            manifest_file = manifest.load(tests_path, manifest_path, types=self.types, meta_filters=self.meta_filters)
+        except manifest.ManifestVersionMismatch:
+            manifest_file = manifest.Manifest(url_base)
+        if manifest_file.url_base != url_base:
+            self.logger.info("Updating url_base in manifest from %s to %s" % (manifest_file.url_base,
+                                                                              url_base))
+            manifest_file.url_base = url_base
+            manifest.write(manifest_file, manifest_path)
+
+        return manifest_file
 
 
 def iterfilter(filters, iter):
     for f in filters:
         iter = f(iter)
     for item in iter:
         yield item
 
@@ -460,22 +504,22 @@ class TestLoader(object):
     def test_ids(self):
         if self._test_ids is None:
             self._test_ids = []
             for test_dict in [self.disabled_tests, self.tests]:
                 for test_type in self.test_types:
                     self._test_ids += [item.id for item in test_dict[test_type]]
         return self._test_ids
 
-    def get_test(self, manifest_file, manifest_test, inherit_metadata, test_metadata):
+    def get_test(self, manifest_test, inherit_metadata, test_metadata):
         if test_metadata is not None:
             inherit_metadata.append(test_metadata)
             test_metadata = test_metadata.get_test(manifest_test.id)
 
-        return wpttest.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
+        return wpttest.from_manifest(manifest_test, inherit_metadata, test_metadata)
 
     def load_dir_metadata(self, test_manifest, metadata_path, test_path):
         rv = []
         path_parts = os.path.dirname(test_path).split(os.path.sep)
         for i in xrange(len(path_parts) + 1):
             path = os.path.join(metadata_path, os.path.sep.join(path_parts[:i]), "__dir__.ini")
             if path not in self.directory_manifests:
                 self.directory_manifests[path] = manifestexpected.get_dir_manifest(path,
@@ -488,39 +532,37 @@ class TestLoader(object):
     def load_metadata(self, test_manifest, metadata_path, test_path):
         inherit_metadata = self.load_dir_metadata(test_manifest, metadata_path, test_path)
         test_metadata = manifestexpected.get_manifest(
             metadata_path, test_path, test_manifest.url_base, self.run_info)
         return inherit_metadata, test_metadata
 
     def iter_tests(self):
         manifest_items = []
-        manifests_by_url_base = {}
 
         for manifest in sorted(self.manifests.keys(), key=lambda x:x.url_base):
             manifest_iter = iterfilter(self.manifest_filters,
                                        manifest.itertypes(*self.test_types))
             manifest_items.extend(manifest_iter)
-            manifests_by_url_base[manifest.url_base] = manifest
 
         if self.chunker is not None:
             manifest_items = self.chunker(manifest_items)
 
         for test_type, test_path, tests in manifest_items:
-            manifest_file = manifests_by_url_base[iter(tests).next().url_base]
+            manifest_file = iter(tests).next().manifest
             metadata_path = self.manifests[manifest_file]["metadata_path"]
             inherit_metadata, test_metadata = self.load_metadata(manifest_file, metadata_path, test_path)
 
             for test in iterfilter(self.meta_filters,
-                                   self.iter_wpttest(manifest_file, inherit_metadata, test_metadata, tests)):
+                                   self.iter_wpttest(inherit_metadata, test_metadata, tests)):
                 yield test_path, test_type, test
 
-    def iter_wpttest(self, manifest_file, inherit_metadata, test_metadata, tests):
+    def iter_wpttest(self, inherit_metadata, test_metadata, tests):
         for manifest_test in tests:
-            yield self.get_test(manifest_file, manifest_test, inherit_metadata, test_metadata)
+            yield self.get_test(manifest_test, inherit_metadata, test_metadata)
 
     def _load_tests(self):
         """Read in the tests from the manifest file and add them to a queue"""
         tests = {"enabled":defaultdict(list),
                  "disabled":defaultdict(list)}
 
         for test_path, test_type, test in self.iter_tests():
             enabled = not test.disabled()
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_update.py
@@ -93,17 +93,17 @@ def suite_log(entries, run_info=None):
             entries +
             [("suite_end", {})])
 
 
 def create_test_manifest(tests, url_base="/"):
     source_files = []
     for i, (test, _, test_type, _) in enumerate(tests):
         if test_type:
-            source_files.append((SourceFileWithTest(test, str(i) * 40, item_classes[test_type]), True))
+            source_files.append(SourceFileWithTest(test, str(i) * 40, item_classes[test_type]))
     m = manifest.Manifest()
     m.update(source_files)
     return m
 
 
 def test_update_0():
     tests = [("path/to/test.htm", ["/path/to/test.htm"], "testharness",
               """[test.htm]
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/tests/test_wpttest.py
@@ -55,17 +55,17 @@ def test_metadata_inherit():
         for item in [dir_ini_0, dir_ini_1]]
     test_metadata = manifestexpected.static.compile(BytesIO(test_0),
                                                     {},
                                                     data_cls_getter=manifestexpected.data_cls_getter,
                                                     test_path="a",
                                                     url_base="")
 
     test = tests[0][2].pop()
-    test_obj = wpttest.from_manifest(tests, test, inherit_metadata, test_metadata.get_test(test.id))
+    test_obj = wpttest.from_manifest(test, inherit_metadata, test_metadata.get_test(test.id))
     assert test_obj.max_assertion_count == 3
     assert test_obj.min_assertion_count == 1
     assert test_obj.prefs == {"b": "c", "c": "d"}
     assert test_obj.tags == {"a", "dir:a"}
 
 
 def test_conditional():
     tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10),
@@ -73,17 +73,17 @@ def test_conditional():
 
     test_metadata = manifestexpected.static.compile(BytesIO(test_1),
                                                     {"os": "win"},
                                                     data_cls_getter=manifestexpected.data_cls_getter,
                                                     test_path="a",
                                                     url_base="")
 
     test = tests[1][2].pop()
-    test_obj = wpttest.from_manifest(tests, test, [], test_metadata.get_test(test.id))
+    test_obj = wpttest.from_manifest(test, [], test_metadata.get_test(test.id))
     assert test_obj.prefs == {"a": "b", "c": "d"}
     assert test_obj.expected() == "FAIL"
 
 def test_metadata_lsan_stack_depth():
     tests = make_mock_manifest(("test", "a", 10), ("test", "a/b", 10))
 
     test_metadata = manifestexpected.static.compile(BytesIO(test_2),
                                                     {},
--- a/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
+++ b/testing/web-platform/tests/tools/wptrunner/wptrunner/wpttest.py
@@ -141,17 +141,17 @@ class Test(object):
         return self.id == other.id
 
     def update_metadata(self, metadata=None):
         if metadata is None:
             metadata = {}
         return metadata
 
     @classmethod
-    def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
+    def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
         timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
         protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
         return cls(manifest_item.source_file.tests_root,
                    manifest_item.url,
                    inherit_metadata,
                    test_metadata,
                    timeout=timeout,
                    path=manifest_item.source_file.path,
@@ -297,17 +297,17 @@ class TestharnessTest(Test):
         Test.__init__(self, tests_root, url, inherit_metadata, test_metadata, timeout,
                       path, protocol)
 
         self.testdriver = testdriver
         self.jsshell = jsshell
         self.scripts = scripts or []
 
     @classmethod
-    def from_manifest(cls, manifest_file, manifest_item, inherit_metadata, test_metadata):
+    def from_manifest(cls, manifest_item, inherit_metadata, test_metadata):
         timeout = cls.long_timeout if manifest_item.timeout == "long" else cls.default_timeout
         protocol = "https" if hasattr(manifest_item, "https") and manifest_item.https else "http"
         testdriver = manifest_item.testdriver if hasattr(manifest_item, "testdriver") else False
         jsshell = manifest_item.jsshell if hasattr(manifest_item, "jsshell") else False
         script_metadata = manifest_item.source_file.script_metadata or []
         scripts = [v for (k, v) in script_metadata if k == b"script"]
         return cls(manifest_item.source_file.tests_root,
                    manifest_item.url,
@@ -347,17 +347,16 @@ class ReftestTest(Test):
                 raise ValueError
 
         self.references = references
         self.viewport_size = viewport_size
         self.dpi = dpi
 
     @classmethod
     def from_manifest(cls,
-                      manifest_file,
                       manifest_test,
                       inherit_metadata,
                       test_metadata,
                       nodes=None,
                       references_seen=None):
 
         timeout = cls.long_timeout if manifest_test.timeout == "long" else cls.default_timeout
 
@@ -390,20 +389,19 @@ class ReftestTest(Test):
                     # Note that just seeing a node for the second time is not
                     # enough to detect a cycle because
                     # A != B != C != A must include C != A
                     # but A == B == A should not include the redundant B == A.
                     continue
 
             references_seen.add(comparison_key)
 
-            manifest_node = manifest_file.get_reference(ref_url)
+            manifest_node = manifest_test.manifest.get_reference(ref_url)
             if manifest_node:
-                reference = ReftestTest.from_manifest(manifest_file,
-                                                      manifest_node,
+                reference = ReftestTest.from_manifest(manifest_node,
                                                       [],
                                                       None,
                                                       nodes,
                                                       references_seen)
             else:
                 reference = ReftestTest(manifest_test.source_file.tests_root,
                                         ref_url,
                                         [],
@@ -445,11 +443,11 @@ class WdspecTest(Test):
 
 
 manifest_test_cls = {"reftest": ReftestTest,
                      "testharness": TestharnessTest,
                      "manual": ManualTest,
                      "wdspec": WdspecTest}
 
 
-def from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata):
+def from_manifest(manifest_test, inherit_metadata, test_metadata):
     test_cls = manifest_test_cls[manifest_test.item_type]
-    return test_cls.from_manifest(manifest_file, manifest_test, inherit_metadata, test_metadata)
+    return test_cls.from_manifest(manifest_test, inherit_metadata, test_metadata)