Bug 1367040 - Switch to using wptrunner in the tests directory, r=maja_zf
☠☠ backed out by 490cc1883685 ☠ ☠
authorJames Graham <james@hoppipolla.co.uk>
Thu, 18 May 2017 18:50:08 +0100
changeset 409660 8966fcaed76ddcd9bc5a5834450f903d074a7067
parent 409659 f736cf17687b00935a50facd6c578ced0a23781c
child 409661 64ff9b546c6e906e762963f33f76b356111ff84a
push id7391
push usermtabara@mozilla.com
push dateMon, 12 Jun 2017 13:08:53 +0000
treeherdermozilla-beta@2191d7f87e2e [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersmaja_zf
bugs1367040
milestone55.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 1367040 - Switch to using wptrunner in the tests directory, r=maja_zf MozReview-Commit-ID: 4gyR245eRou
build/virtualenv_packages.txt
testing/web-platform/harness/.gitignore
testing/web-platform/harness/.travis.yml
testing/web-platform/harness/MANIFEST.in
testing/web-platform/harness/README.rst
testing/web-platform/harness/docs/Makefile
testing/web-platform/harness/docs/architecture.svg
testing/web-platform/harness/docs/conf.py
testing/web-platform/harness/docs/design.rst
testing/web-platform/harness/docs/expectation.rst
testing/web-platform/harness/docs/index.rst
testing/web-platform/harness/docs/make.bat
testing/web-platform/harness/docs/usage.rst
testing/web-platform/harness/requirements.txt
testing/web-platform/harness/requirements_chrome.txt
testing/web-platform/harness/requirements_firefox.txt
testing/web-platform/harness/requirements_servo.txt
testing/web-platform/harness/setup.py
testing/web-platform/harness/test/metadata/reftest/reftest_and_fail.html.ini
testing/web-platform/harness/test/metadata/reftest/reftest_cycle_fail.html.ini
testing/web-platform/harness/test/metadata/reftest/reftest_match_fail.html.ini
testing/web-platform/harness/test/metadata/reftest/reftest_mismatch_fail.html.ini
testing/web-platform/harness/test/metadata/reftest/reftest_ref_timeout.html.ini
testing/web-platform/harness/test/metadata/reftest/reftest_timeout.html.ini
testing/web-platform/harness/test/metadata/testharness/firefox/__dir__.ini
testing/web-platform/harness/test/metadata/testharness/firefox/subdir/test_pref_reset.html.ini
testing/web-platform/harness/test/metadata/testharness/firefox/test_pref_set.html.ini
testing/web-platform/harness/test/metadata/testharness/subdir/__dir__.ini
testing/web-platform/harness/test/metadata/testharness/subdir/testharness_1.html.ini
testing/web-platform/harness/test/metadata/testharness/testharness_0.html.ini
testing/web-platform/harness/test/metadata/testharness/testharness_error.html.ini
testing/web-platform/harness/test/metadata/testharness/testharness_timeout.html.ini
testing/web-platform/harness/test/test.cfg.example
testing/web-platform/harness/test/test.py
testing/web-platform/harness/test/testdata/reftest/green-ref.html
testing/web-platform/harness/test/testdata/reftest/green.html
testing/web-platform/harness/test/testdata/reftest/red.html
testing/web-platform/harness/test/testdata/reftest/reftest.https.html
testing/web-platform/harness/test/testdata/reftest/reftest_and_fail.html
testing/web-platform/harness/test/testdata/reftest/reftest_and_fail_0-ref.html
testing/web-platform/harness/test/testdata/reftest/reftest_cycle.html
testing/web-platform/harness/test/testdata/reftest/reftest_cycle_0-ref.html
testing/web-platform/harness/test/testdata/reftest/reftest_cycle_1-ref.html
testing/web-platform/harness/test/testdata/reftest/reftest_cycle_fail.html
testing/web-platform/harness/test/testdata/reftest/reftest_cycle_fail_0-ref.html
testing/web-platform/harness/test/testdata/reftest/reftest_match.html
testing/web-platform/harness/test/testdata/reftest/reftest_match_fail.html
testing/web-platform/harness/test/testdata/reftest/reftest_mismatch.html
testing/web-platform/harness/test/testdata/reftest/reftest_mismatch_fail.html
testing/web-platform/harness/test/testdata/reftest/reftest_or_0.html
testing/web-platform/harness/test/testdata/reftest/reftest_ref_timeout-ref.html
testing/web-platform/harness/test/testdata/reftest/reftest_ref_timeout.html
testing/web-platform/harness/test/testdata/reftest/reftest_timeout.html
testing/web-platform/harness/test/testdata/reftest/reftest_wait_0.html
testing/web-platform/harness/test/testdata/testharness/firefox/subdir/test_pref_inherit.html
testing/web-platform/harness/test/testdata/testharness/firefox/subdir/test_pref_reset.html
testing/web-platform/harness/test/testdata/testharness/firefox/test_pref_dir.html
testing/web-platform/harness/test/testdata/testharness/firefox/test_pref_set.html
testing/web-platform/harness/test/testdata/testharness/subdir/testharness_1.html
testing/web-platform/harness/test/testdata/testharness/testharness.https.html
testing/web-platform/harness/test/testdata/testharness/testharness_0.html
testing/web-platform/harness/test/testdata/testharness/testharness_error.html
testing/web-platform/harness/test/testdata/testharness/testharness_long_timeout.html
testing/web-platform/harness/test/testdata/testharness/testharness_timeout.html
testing/web-platform/harness/tox.ini
testing/web-platform/harness/wptrunner.default.ini
testing/web-platform/harness/wptrunner/__init__.py
testing/web-platform/harness/wptrunner/browsers/__init__.py
testing/web-platform/harness/wptrunner/browsers/b2g_setup/certtest_app.zip
testing/web-platform/harness/wptrunner/browsers/base.py
testing/web-platform/harness/wptrunner/browsers/chrome.py
testing/web-platform/harness/wptrunner/browsers/edge.py
testing/web-platform/harness/wptrunner/browsers/firefox.py
testing/web-platform/harness/wptrunner/browsers/server-locations.txt
testing/web-platform/harness/wptrunner/browsers/servo.py
testing/web-platform/harness/wptrunner/browsers/servodriver.py
testing/web-platform/harness/wptrunner/config.json
testing/web-platform/harness/wptrunner/config.py
testing/web-platform/harness/wptrunner/environment.py
testing/web-platform/harness/wptrunner/executors/__init__.py
testing/web-platform/harness/wptrunner/executors/base.py
testing/web-platform/harness/wptrunner/executors/executormarionette.py
testing/web-platform/harness/wptrunner/executors/executorselenium.py
testing/web-platform/harness/wptrunner/executors/executorservo.py
testing/web-platform/harness/wptrunner/executors/executorservodriver.py
testing/web-platform/harness/wptrunner/executors/process.py
testing/web-platform/harness/wptrunner/executors/pytestrunner/__init__.py
testing/web-platform/harness/wptrunner/executors/pytestrunner/fixtures.py
testing/web-platform/harness/wptrunner/executors/pytestrunner/runner.py
testing/web-platform/harness/wptrunner/executors/reftest-wait.js
testing/web-platform/harness/wptrunner/executors/reftest-wait_servodriver.js
testing/web-platform/harness/wptrunner/executors/reftest-wait_webdriver.js
testing/web-platform/harness/wptrunner/executors/reftest.js
testing/web-platform/harness/wptrunner/executors/testharness_marionette.js
testing/web-platform/harness/wptrunner/executors/testharness_servodriver.js
testing/web-platform/harness/wptrunner/executors/testharness_webdriver.js
testing/web-platform/harness/wptrunner/expected.py
testing/web-platform/harness/wptrunner/hosts.py
testing/web-platform/harness/wptrunner/manifestexpected.py
testing/web-platform/harness/wptrunner/manifestinclude.py
testing/web-platform/harness/wptrunner/manifestupdate.py
testing/web-platform/harness/wptrunner/metadata.py
testing/web-platform/harness/wptrunner/products.py
testing/web-platform/harness/wptrunner/reduce.py
testing/web-platform/harness/wptrunner/testharness_runner.html
testing/web-platform/harness/wptrunner/testharnessreport-servo.js
testing/web-platform/harness/wptrunner/testharnessreport-servodriver.js
testing/web-platform/harness/wptrunner/testharnessreport.js
testing/web-platform/harness/wptrunner/testloader.py
testing/web-platform/harness/wptrunner/testrunner.py
testing/web-platform/harness/wptrunner/tests/__init__.py
testing/web-platform/harness/wptrunner/tests/test_chunker.py
testing/web-platform/harness/wptrunner/tests/test_hosts.py
testing/web-platform/harness/wptrunner/tests/test_testloader.py
testing/web-platform/harness/wptrunner/tests/test_update.py
testing/web-platform/harness/wptrunner/update/__init__.py
testing/web-platform/harness/wptrunner/update/base.py
testing/web-platform/harness/wptrunner/update/metadata.py
testing/web-platform/harness/wptrunner/update/state.py
testing/web-platform/harness/wptrunner/update/sync.py
testing/web-platform/harness/wptrunner/update/tree.py
testing/web-platform/harness/wptrunner/update/update.py
testing/web-platform/harness/wptrunner/vcs.py
testing/web-platform/harness/wptrunner/webdriver_server.py
testing/web-platform/harness/wptrunner/wptcommandline.py
testing/web-platform/harness/wptrunner/wptlogging.py
testing/web-platform/harness/wptrunner/wptmanifest/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/conditional.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/static.py
testing/web-platform/harness/wptrunner/wptmanifest/node.py
testing/web-platform/harness/wptrunner/wptmanifest/parser.py
testing/web-platform/harness/wptrunner/wptmanifest/serializer.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_conditional.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_parser.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_serializer.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_static.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_tokenizer.py
testing/web-platform/harness/wptrunner/wptrunner.py
testing/web-platform/harness/wptrunner/wpttest.py
testing/web-platform/moz.build
testing/web-platform/runtests.py
testing/web-platform/tests/tools/browserutils/browser.py
testing/web-platform/tests/tools/browserutils/utils.py
testing/web-platform/tests/tools/browserutils/virtualenv.py
testing/web-platform/tests/tools/wptrun.py
testing/web-platform/tests/tools/wptrunner/tox.ini
testing/web-platform/tests/tools/wptrunner/wptrunner/browsers/edge.py
testing/web-platform/tests/tools/wptrunner/wptrunner/config.json
testing/web-platform/tests/tools/wptrunner/wptrunner/environment.py
testing/web-platform/tests/tools/wptrunner/wptrunner/executors/executorservo.py
testing/web-platform/tests/tools/wptrunner/wptrunner/testharness_runner.html
--- a/build/virtualenv_packages.txt
+++ b/build/virtualenv_packages.txt
@@ -37,17 +37,17 @@ mozilla.pth:testing/firefox-ui/harness
 mozilla.pth:testing/marionette/client
 mozilla.pth:testing/marionette/harness
 mozilla.pth:testing/marionette/harness/marionette_harness/runner/mixins/browsermob-proxy-py
 mozilla.pth:testing/marionette/puppeteer/firefox
 packages.txt:testing/mozbase/packages.txt
 mozilla.pth:testing/taskcluster
 mozilla.pth:testing/tools/autotry
 mozilla.pth:testing/web-platform
-mozilla.pth:testing/web-platform/harness
+mozilla.pth:testing/web-platform/tests/tools/wptrunner
 mozilla.pth:testing/web-platform/tests/tools/wptserve
 mozilla.pth:testing/web-platform/tests/tools/six
 mozilla.pth:testing/xpcshell
 mozilla.pth:python/mock-1.0.0
 mozilla.pth:xpcom/typelib/xpt/tools
 mozilla.pth:tools/docs
 mozilla.pth:media/webrtc/trunk/tools/gyp/pylib
 mozilla.pth:python/pyasn1
deleted file mode 100644
--- a/testing/web-platform/harness/.gitignore
+++ /dev/null
@@ -1,7 +0,0 @@
-*.py[co]
-*~
-*#
-\#*
-_virtualenv
-test/test.cfg
-test/metadata/MANIFEST.json
deleted file mode 100644
--- a/testing/web-platform/harness/.travis.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-language: python
-python: 2.7
-
-sudo: false
-
-cache:
-  directories:
-    - $HOME/.cache/pip
-
-env:
-  - TOXENV="{py27,pypy}-base"
-  - TOXENV="{py27,pypy}-chrome"
-  - TOXENV="{py27,pypy}-firefox"
-  - TOXENV="{py27,pypy}-servo"
-
-install:
-  - pip install -U tox
-
-script:
-  - tox
deleted file mode 100644
--- a/testing/web-platform/harness/MANIFEST.in
+++ /dev/null
@@ -1,17 +0,0 @@
-exclude MANIFEST.in
-include requirements.txt
-include wptrunner/browsers/b2g_setup/*
-include wptrunner.default.ini
-include wptrunner/testharness_runner.html
-include wptrunner/testharnessreport.js
-include wptrunner/testharnessreport-servo.js
-include wptrunner/testharnessreport-servodriver.js
-include wptrunner/executors/testharness_marionette.js
-include wptrunner/executors/testharness_servodriver.js
-include wptrunner/executors/testharness_webdriver.js
-include wptrunner/executors/reftest.js
-include wptrunner/executors/reftest-wait.js
-include wptrunner/executors/reftest-wait_servodriver.js
-include wptrunner/executors/reftest-wait_webdriver.js
-include wptrunner/config.json
-include wptrunner/browsers/server-locations.txt
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/README.rst
+++ /dev/null
@@ -1,242 +0,0 @@
-wptrunner: A web-platform-tests harness
-=======================================
-
-wptrunner is a harness for running the W3C `web-platform-tests testsuite`_.
-
-.. contents::
-
-Installation
-~~~~~~~~~~~~
-
-wptrunner is expected to be installed into a virtualenv using pip. For
-development, it can be installed using the `-e` option::
-
-  pip install -e ./
-
-Running the Tests
-~~~~~~~~~~~~~~~~~
-
-After installation, the command ``wptrunner`` should be available to run
-the tests.
-
-The ``wptrunner`` command  takes multiple options, of which the
-following are most significant:
-
-``--product`` (defaults to `firefox`)
-  The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
-
-``--binary`` (required if product is `firefox` or `servo`)
-  The path to a binary file for the product (browser) to test against.
-
-``--webdriver-binary`` (required if product is `chrome`)
-  The path to a `driver` binary; e.g., a `chromedriver` binary.
-
-``--certutil-binary`` (required if product is `firefox` [#]_)
-  The path to a `certutil` binary (for tests that must be run over https).
-
-``--metadata`` (required)
-  The path to a directory containing test metadata. [#]_
-
-``--tests`` (required)
-  The path to a directory containing a web-platform-tests checkout.
-
-``--prefs-root`` (required only when testing a Firefox binary)
-  The path to a directory containing Firefox test-harness preferences. [#]_
-
-``--config`` (should default to `wptrunner.default.ini`)
-  The path to the config (ini) file.
-
-.. [#] The ``--certutil-binary`` option is required when the product is
-   ``firefox`` unless ``--ssl-type=none`` is specified.
-
-.. [#] The ``--metadata`` path is to a directory that contains:
-
-  * a ``MANIFEST.json`` file (instructions on generating this file are
-    available in the `detailed documentation
-    <http://wptrunner.readthedocs.org/en/latest/usage.html#installing-wptrunner>`_);
-    and
-  * (optionally) any expectation files (see below)
-
-.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
-
-There are also a variety of other options available; use ``--help`` to
-list them.
-
--------------------------------
-Example: How to start wptrunner
--------------------------------
-
-To test a Firefox Nightly build in an OS X environment, you might start
-wptrunner using something similar to the following example::
-
-  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
-    --binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
-    --certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
-    --prefs-root=~/mozilla-central/testing/profiles
-
-And to test a Chromium build in an OS X environment, you might start
-wptrunner using something similar to the following example::
-
-  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
-    --binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
-    --webdriver-binary=/usr/local/bin/chromedriver --product=chrome
-
--------------------------------------
-Example: How to run a subset of tests
--------------------------------------
-
-To restrict a test run just to tests in a particular web-platform-tests
-subdirectory, specify the directory name in the positional arguments after
-the options; for example, run just the tests in the `dom` subdirectory::
-
-  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
-    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
-    --prefs-root=/path/to/testing/profiles \
-    dom
-
-Output
-~~~~~~
-
-By default wptrunner just dumps its entire output as raw JSON messages
-to stdout. This is convenient for piping into other tools, but not ideal
-for humans reading the output.
-
-As an alternative, you can use the ``--log-mach`` option, which provides
-output in a reasonable format for humans. The option requires a value:
-either the path for a file to write the `mach`-formatted output to, or
-"`-`" (a hyphen) to write the `mach`-formatted output to stdout.
-
-When using ``--log-mach``, output of the full raw JSON log is still
-available, from the ``--log-raw`` option. So to output the full raw JSON
-log to a file and a human-readable summary to stdout, you might start
-wptrunner using something similar to the following example::
-
-  wptrunner --metadata=~/web-platform-tests/ --tests=~/web-platform-tests/ \
-    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
-    --prefs-root=/path/to/testing/profiles \
-    --log-raw=output.log --log-mach=-
-
-Expectation Data
-~~~~~~~~~~~~~~~~
-
-wptrunner is designed to be used in an environment where it is not
-just necessary to know which tests passed, but to compare the results
-between runs. For this reason it is possible to store the results of a
-previous run in a set of ini-like "expectation files". This format is
-documented below. To generate the expectation files use `wptrunner` with
-the `--log-raw=/path/to/log/file` option. This can then be used as
-input to the `wptupdate` tool.
-
-Expectation File Format
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Metadata about tests, notably including their expected results, is
-stored in a modified ini-like format that is designed to be human
-editable, but also to be machine updatable.
-
-Each test file that requires metadata to be specified (because it has
-a non-default expectation or because it is disabled, for example) has
-a corresponding expectation file in the `metadata` directory. For
-example a test file `html/test1.html` containing a failing test would
-have an expectation file called `html/test1.html.ini` in the
-`metadata` directory.
-
-An example of an expectation file is::
-
-  example_default_key: example_value
-
-  [filename.html]
-    type: testharness
-
-    [subtest1]
-      expected: FAIL
-
-    [subtest2]
-      expected:
-        if platform == 'win': TIMEOUT
-        if platform == 'osx': ERROR
-        FAIL
-
-  [filename.html?query=something]
-    type: testharness
-    disabled: bug12345
-
-The file consists of two elements, key-value pairs and
-sections.
-
-Sections are delimited by headings enclosed in square brackets. Any
-closing square bracket in the heading itself my be escaped with a
-backslash. Each section may then contain any number of key-value pairs
-followed by any number of subsections. So that it is clear which data
-belongs to each section without the use of end-section markers, the
-data for each section (i.e. the key-value pairs and subsections) must
-be indented using spaces. Indentation need only be consistent, but
-using two spaces per level is recommended.
-
-In a test expectation file, each resource provided by the file has a
-single section, with the section heading being the part after the last
-`/` in the test url. Tests that have subsections may have subsections
-for those subtests in which the heading is the name of the subtest.
-
-Simple key-value pairs are of the form::
-
-  key: value
-
-Note that unlike ini files, only `:` is a valid seperator; `=` will
-not work as expected. Key-value pairs may also have conditional
-values of the form::
-
-  key:
-    if condition1: value1
-    if condition2: value2
-    default
-
-In this case each conditional is evaluated in turn and the value is
-that on the right hand side of the first matching conditional. In the
-case that no condition matches, the unconditional default is used. If
-no condition matches and no default is provided it is equivalent to
-the key not being present. Conditionals use a simple python-like expression
-language e.g.::
-
-  if debug and (platform == "linux" or platform == "osx"): FAIL
-
-For test expectations the avaliable variables are those in the
-`run_info` which for desktop are `version`, `os`, `bits`, `processor`,
-`debug` and `product`.
-
-Key-value pairs specified at the top level of the file before any
-sections are special as they provide defaults for the rest of the file
-e.g.::
-
-  key1: value1
-
-  [section 1]
-    key2: value2
-
-  [section 2]
-    key1: value3
-
-In this case, inside section 1, `key1` would have the value `value1`
-and `key2` the value `value2` whereas in section 2 `key1` would have
-the value `value3` and `key2` would be undefined.
-
-The web-platform-test harness knows about several keys:
-
-`expected`
-  Must evaluate to a possible test status indicating the expected
-  result of the test. The implicit default is PASS or OK when the
-  field isn't present.
-
-`disabled`
-  Any value indicates that the test is disabled.
-
-`type`
-  The test type e.g. `testharness`, `reftest`, or `wdspec`.
-
-`reftype`
-  The type of comparison for reftests; either `==` or `!=`.
-
-`refurl`
-  The reference url for reftests.
-
-.. _`web-platform-tests testsuite`: https://github.com/w3c/web-platform-tests
deleted file mode 100644
--- a/testing/web-platform/harness/docs/Makefile
+++ /dev/null
@@ -1,177 +0,0 @@
-# Makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS    =
-SPHINXBUILD   = sphinx-build
-PAPER         =
-BUILDDIR      = _build
-
-# User-friendly check for sphinx-build
-ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
-$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
-endif
-
-# Internal variables.
-PAPEROPT_a4     = -D latex_paper_size=a4
-PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-# the i18n builder cannot share the environment and doctrees with the others
-I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
-
-.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
-
-help:
-	@echo "Please use \`make <target>' where <target> is one of"
-	@echo "  html       to make standalone HTML files"
-	@echo "  dirhtml    to make HTML files named index.html in directories"
-	@echo "  singlehtml to make a single large HTML file"
-	@echo "  pickle     to make pickle files"
-	@echo "  json       to make JSON files"
-	@echo "  htmlhelp   to make HTML files and a HTML help project"
-	@echo "  qthelp     to make HTML files and a qthelp project"
-	@echo "  devhelp    to make HTML files and a Devhelp project"
-	@echo "  epub       to make an epub"
-	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
-	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
-	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
-	@echo "  text       to make text files"
-	@echo "  man        to make manual pages"
-	@echo "  texinfo    to make Texinfo files"
-	@echo "  info       to make Texinfo files and run them through makeinfo"
-	@echo "  gettext    to make PO message catalogs"
-	@echo "  changes    to make an overview of all changed/added/deprecated items"
-	@echo "  xml        to make Docutils-native XML files"
-	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
-	@echo "  linkcheck  to check all external links for integrity"
-	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
-
-clean:
-	rm -rf $(BUILDDIR)/*
-
-html:
-	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
-
-dirhtml:
-	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
-	@echo
-	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
-
-singlehtml:
-	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
-	@echo
-	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
-
-pickle:
-	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
-	@echo
-	@echo "Build finished; now you can process the pickle files."
-
-json:
-	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
-	@echo
-	@echo "Build finished; now you can process the JSON files."
-
-htmlhelp:
-	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
-	@echo
-	@echo "Build finished; now you can run HTML Help Workshop with the" \
-	      ".hhp project file in $(BUILDDIR)/htmlhelp."
-
-qthelp:
-	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
-	@echo
-	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
-	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
-	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptrunner.qhcp"
-	@echo "To view the help file:"
-	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptrunner.qhc"
-
-devhelp:
-	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
-	@echo
-	@echo "Build finished."
-	@echo "To view the help file:"
-	@echo "# mkdir -p $$HOME/.local/share/devhelp/wptrunner"
-	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptrunner"
-	@echo "# devhelp"
-
-epub:
-	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
-	@echo
-	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
-
-latex:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo
-	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
-	@echo "Run \`make' in that directory to run these through (pdf)latex" \
-	      "(use \`make latexpdf' here to do that automatically)."
-
-latexpdf:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through pdflatex..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-latexpdfja:
-	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
-	@echo "Running LaTeX files through platex and dvipdfmx..."
-	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
-	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
-
-text:
-	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
-	@echo
-	@echo "Build finished. The text files are in $(BUILDDIR)/text."
-
-man:
-	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
-	@echo
-	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
-
-texinfo:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo
-	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
-	@echo "Run \`make' in that directory to run these through makeinfo" \
-	      "(use \`make info' here to do that automatically)."
-
-info:
-	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
-	@echo "Running Texinfo files through makeinfo..."
-	make -C $(BUILDDIR)/texinfo info
-	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
-
-gettext:
-	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
-	@echo
-	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
-
-changes:
-	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
-	@echo
-	@echo "The overview file is in $(BUILDDIR)/changes."
-
-linkcheck:
-	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
-	@echo
-	@echo "Link check complete; look for any errors in the above output " \
-	      "or in $(BUILDDIR)/linkcheck/output.txt."
-
-doctest:
-	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
-	@echo "Testing of doctests in the sources finished, look at the " \
-	      "results in $(BUILDDIR)/doctest/output.txt."
-
-xml:
-	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
-	@echo
-	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
-
-pseudoxml:
-	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
-	@echo
-	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
deleted file mode 100644
--- a/testing/web-platform/harness/docs/architecture.svg
+++ /dev/null
@@ -1,1 +0,0 @@
-<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="780px" height="1087px" version="1.1"><defs><linearGradient x1="0%" y1="0%" x2="0%" y2="100%" id="mx-gradient-a9c4eb-1-a9c4eb-1-s-0"><stop offset="0%" style="stop-color:#A9C4EB"/><stop offset="100%" style="stop-color:#A9C4EB"/></linearGradient></defs><g transform="translate(0.5,0.5)"><rect x="498" y="498" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(500,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunner</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="778" width="120" height="60" fill="#f19c99" stroke="#000000" pointer-events="none"/><g transform="translate(340,801)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Product under test</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="388" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ManagerGroup</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="658" y="608" width="120" height="60" fill="#ffce9f" stroke="#000000" pointer-events="none"/><g transform="translate(660,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Executor</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="498" width="120" height="60" fill="url(#mx-gradient-a9c4eb-1-a9c4eb-1-s-0)" stroke="#000000" pointer-events="none"/><g transform="translate(340,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Browser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 398 382" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 387 L 395 380 L 398 382 L 402 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 448 L 398 492" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 497 L 395 490 L 398 492 L 402 490 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 618 528 L 684 603" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 687 607 L 680 604 L 684 603 L 685 600 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="498" y="608" width="120" height="60" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><g transform="translate(500,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ExecutorBrowser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 624 638 L 658 638" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 619 638 L 626 635 L 624 638 L 626 642 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 448 L 552 496" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 557 498 L 549 498 L 552 496 L 552 492 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 558 L 398 772" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 777 L 395 770 L 398 772 L 402 770 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="338" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">run_tests</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 458 78 L 652 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 657 78 L 650 82 L 652 78 L 650 75 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="658" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestLoader</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="71" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(73,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestEnvironment</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="151" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(153,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">wptserve</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="1" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(3,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">pywebsocket</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 338 78 L 197 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 192 78 L 199 75 L 197 78 L 199 82 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 101 308 L 62 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 61 617 L 59 610 L 62 612 L 66 610 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 161 308 L 204 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 204 617 L 200 610 L 204 612 L 207 609 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 338 823 L 61 678" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 211 678 L 338 793" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 108 L 398 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 227 L 395 220 L 398 222 L 402 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 706 288 L 618 513" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="658" y="388" width="70" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="693" y="412">Queue.get</text></g><path d="M 458 808 L 718 668" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="71" y="248" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(73,271)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">serve.py</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 131 108 L 131 242" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 131 247 L 128 240 L 131 242 L 135 240 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 88 973 L 132 973" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 137 973 L 130 977 L 132 973 L 130 970 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="1018" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="1037">Communication (cross process)</text></g><path d="M 88 1002 L 132 1002" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 137 1002 L 130 1006 L 132 1002 L 130 999 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="958" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="977">Ownership (same process)</text></g><path d="M 88 1033 L 138 1033" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="143" y="988" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="233" y="1007">Ownership (cross process)</text></g><rect x="428" y="966" width="50" height="15" fill="#e6d0de" stroke="#000000" pointer-events="none"/><rect x="428" y="990" width="50" height="15" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><rect x="428" y="1015" width="50" height="15" fill="#ffce9f" stroke="#000000" pointer-events="none"/><rect x="428" y="1063" width="50" height="15" fill="#f19c99" stroke="#000000" pointer-events="none"/><rect x="428" y="1038" width="50" height="15" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><rect x="485" y="958" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="530" y="977">wptrunner class</text></g><rect x="486" y="983" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1002">Per-product wptrunner class</text></g><rect x="486" y="1008" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1027">Per-protocol wptrunner class</text></g><rect x="491" y="1031" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="566" y="1050">Web-platform-tests component</text></g><rect x="486" y="1055" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="531" y="1074">Browser process</text></g><path d="M 398 8 L 398 42" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 47 L 395 40 L 398 42 L 402 40 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="478" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(480,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 533 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 537 387 L 529 386 L 533 384 L 533 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="198" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(200,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 263 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 259 387 L 263 380 L 263 384 L 267 386 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="575" y="748" width="110" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="630" y="758">Browser control</text><text x="630" y="772">protocol</text><text x="630" y="786">(e.g. WebDriver)</text></g><rect x="258" y="708" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="298" y="732">HTTP</text></g><rect x="111" y="728" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="151" y="752">websockets</text></g><rect x="658" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Tests Queue</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 718 108 L 718 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 718 227 L 715 220 L 718 222 L 722 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 970 L 428 970" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></g></svg>
deleted file mode 100644
--- a/testing/web-platform/harness/docs/conf.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# wptrunner documentation build configuration file, created by
-# sphinx-quickstart on Mon May 19 18:14:20 2014.
-#
-# This file is execfile()d with the current directory set to its
-# containing dir.
-#
-# Note that not all possible configuration values are present in this
-# autogenerated file.
-#
-# All configuration values have a default; values that are commented out
-# serve to show the default.
-
-import sys
-import os
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#sys.path.insert(0, os.path.abspath('.'))
-
-# -- General configuration ------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
-    'sphinx.ext.autodoc',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.viewcode',
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix of source filenames.
-source_suffix = '.rst'
-
-# The encoding of source files.
-#source_encoding = 'utf-8-sig'
-
-# The master toctree document.
-master_doc = 'index'
-
-# General information about the project.
-project = u'wptrunner'
-copyright = u''
-
-# The version info for the project you're documenting, acts as replacement for
-# |version| and |release|, also used in various other places throughout the
-# built documents.
-#
-# The short X.Y version.
-version = '0.3'
-# The full version, including alpha/beta/rc tags.
-release = '0.3'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#language = None
-
-# There are two options for replacing |today|: either, you set today to some
-# non-false value, then it is used:
-#today = ''
-# Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-exclude_patterns = ['_build']
-
-# The reST default role (used for this markup: `text`) to use for all
-# documents.
-#default_role = None
-
-# If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
-
-# If true, the current module name will be prepended to all description
-# unit titles (such as .. function::).
-#add_module_names = True
-
-# If true, sectionauthor and moduleauthor directives will be shown in the
-# output. They are ignored by default.
-#show_authors = False
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-# A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
-
-# If true, keep warnings as "system message" paragraphs in the built documents.
-#keep_warnings = False
-
-
-# -- Options for HTML output ----------------------------------------------
-
-# The theme to use for HTML and HTML Help pages.  See the documentation for
-# a list of builtin themes.
-html_theme = 'default'
-
-# Theme options are theme-specific and customize the look and feel of a theme
-# further.  For a list of options available for each theme, see the
-# documentation.
-#html_theme_options = {}
-
-# Add any paths that contain custom themes here, relative to this directory.
-#html_theme_path = []
-
-# The name for this set of Sphinx documents.  If None, it defaults to
-# "<project> v<release> documentation".
-#html_title = None
-
-# A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = None
-
-# The name of an image file (relative to this directory) to place at the top
-# of the sidebar.
-#html_logo = None
-
-# The name of an image file (within the static path) to use as favicon of the
-# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
-# pixels large.
-#html_favicon = None
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Add any extra paths that contain custom files (such as robots.txt or
-# .htaccess) here, relative to this directory. These files are copied
-# directly to the root of the documentation.
-#html_extra_path = []
-
-# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
-# using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
-
-# If true, SmartyPants will be used to convert quotes and dashes to
-# typographically correct entities.
-#html_use_smartypants = True
-
-# Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
-
-# Additional templates that should be rendered to pages, maps page names to
-# template names.
-#html_additional_pages = {}
-
-# If false, no module index is generated.
-#html_domain_indices = True
-
-# If false, no index is generated.
-#html_use_index = True
-
-# If true, the index is split into individual pages for each letter.
-#html_split_index = False
-
-# If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
-
-# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
-
-# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
-
-# If true, an OpenSearch description file will be output, and all pages will
-# contain a <link> tag referring to it.  The value of this option must be the
-# base URL from which the finished HTML is served.
-#html_use_opensearch = ''
-
-# This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'wptrunnerdoc'
-
-
-# -- Options for LaTeX output ---------------------------------------------
-
-latex_elements = {
-# The paper size ('letterpaper' or 'a4paper').
-#'papersize': 'letterpaper',
-
-# The font size ('10pt', '11pt' or '12pt').
-#'pointsize': '10pt',
-
-# Additional stuff for the LaTeX preamble.
-#'preamble': '',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-#  author, documentclass [howto, manual, or own class]).
-latex_documents = [
-  ('index', 'wptrunner.tex', u'wptrunner Documentation',
-   u'James Graham', 'manual'),
-]
-
-# The name of an image file (relative to this directory) to place at the top of
-# the title page.
-#latex_logo = None
-
-# For "manual" documents, if this is true, then toplevel headings are parts,
-# not chapters.
-#latex_use_parts = False
-
-# If true, show page references after internal links.
-#latex_show_pagerefs = False
-
-# If true, show URL addresses after external links.
-#latex_show_urls = False
-
-# Documents to append as an appendix to all manuals.
-#latex_appendices = []
-
-# If false, no module index is generated.
-#latex_domain_indices = True
-
-
-# -- Options for manual page output ---------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
-    ('index', 'wptrunner', u'wptrunner Documentation',
-     [u'James Graham'], 1)
-]
-
-# If true, show URL addresses after external links.
-#man_show_urls = False
-
-
-# -- Options for Texinfo output -------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-#  dir menu entry, description, category)
-texinfo_documents = [
-  ('index', 'wptrunner', u'wptrunner Documentation',
-   u'James Graham', 'wptrunner', 'One line description of project.',
-   'Miscellaneous'),
-]
-
-# Documents to append as an appendix to all manuals.
-#texinfo_appendices = []
-
-# If false, no module index is generated.
-#texinfo_domain_indices = True
-
-# How to display URL addresses: 'footnote', 'no', or 'inline'.
-#texinfo_show_urls = 'footnote'
-
-# If true, do not generate a @detailmenu in the "Top" node's menu.
-#texinfo_no_detailmenu = False
-
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {'python': ('http://docs.python.org/', None),
-                       'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}
deleted file mode 100644
--- a/testing/web-platform/harness/docs/design.rst
+++ /dev/null
@@ -1,106 +0,0 @@
-wptrunner Design
-================
-
-The design of wptrunner is intended to meet the following
-requirements:
-
- * Possible to run tests from W3C web-platform-tests.
-
- * Tests should be run as fast as possible. In particular it should
-   not be necessary to restart the browser between tests, or similar.
-
- * As far as possible, the tests should run in a "normal" browser and
-   browsing context. In particular many tests assume that they are
-   running in a top-level browsing context, so we must avoid the use
-   of an ``iframe`` test container.
-
- * It must be possible to deal with all kinds of behaviour of the
-   browser runder test, for example, crashing, hanging, etc.
-
- * It should be possible to add support for new platforms and browsers
-   with minimal code changes.
-
- * It must be possible to run tests in parallel to further improve
-   performance.
-
- * Test output must be in a machine readable form.
-
-Architecture
-------------
-
-In order to meet the above requirements, wptrunner is designed to
-push as much of the test scheduling as possible into the harness. This
-allows the harness to monitor the state of the browser and perform
-appropriate action if it gets into an unwanted state e.g. kill the
-browser if it appears to be hung.
-
-The harness will typically communicate with the browser via some remote
-control protocol such as WebDriver. However for browsers where no such
-protocol is supported, other implementation strategies are possible,
-typically at the expense of speed.
-
-The overall architecture of wptrunner is shown in the diagram below:
-
-.. image:: architecture.svg
-
-The main entry point to the code is :py:func:`run_tests` in
-``wptrunner.py``. This is responsible for setting up the test
-environment, loading the list of tests to be executed, and invoking
-the remainder of the code to actually execute some tests.
-
-The test environment is encapsulated in the
-:py:class:`TestEnvironment` class. This defers to code in
-``web-platform-tests`` which actually starts the required servers to
-run the tests.
-
-The set of tests to run is defined by the
-:py:class:`TestLoader`. This is constructed with a
-:py:class:`TestFilter` (not shown), which takes any filter arguments
-from the command line to restrict the set of tests that will be
-run. The :py:class:`TestLoader` reads both the ``web-platform-tests``
-JSON manifest and the expectation data stored in ini files and
-produces a :py:class:`multiprocessing.Queue` of tests to run, and
-their expected results.
-
-Actually running the tests happens through the
-:py:class:`ManagerGroup` object. This takes the :py:class:`Queue` of
-tests to be run and starts a :py:class:`testrunner.TestRunnerManager` for each
-instance of the browser under test that will be started. These
-:py:class:`TestRunnerManager` instances are each started in their own
-thread.
-
-A :py:class:`TestRunnerManager` coordinates starting the product under
-test, and outputting results from the test. In the case that the test
-has timed out or the browser has crashed, it has to restart the
-browser to ensure the test run can continue. The functionality for
-initialising the browser under test, and probing its state
-(e.g. whether the process is still alive) is implemented through a
-:py:class:`Browser` object. An implementation of this class must be
-provided for each product that is supported.
-
-The functionality for actually running the tests is provided by a
-:py:class:`TestRunner` object. :py:class:`TestRunner` instances are
-run in their own child process created with the
-:py:mod:`multiprocessing` module. This allows them to run concurrently
-and to be killed and restarted as required. Communication between the
-:py:class:`TestRunnerManager` and the :py:class:`TestRunner` is
-provided by a pair of queues, one for sending messages in each
-direction. In particular test results are sent from the
-:py:class:`TestRunner` to the :py:class:`TestRunnerManager` using one
-of these queues.
-
-The :py:class:`TestRunner` object is generic in that the same
-:py:class:`TestRunner` is used regardless of the product under
-test. However the details of how to run the test may vary greatly with
-the product since different products support different remote control
-protocols (or none at all). These protocol-specific parts are placed
-in the :py:class:`Executor` object. There is typically a different
-:py:class:`Executor` class for each combination of control protocol
-and test type. The :py:class:`TestRunner` is responsible for pulling
-each test off the :py:class:`Queue` of tests and passing it down to
-the :py:class:`Executor`.
-
-The executor often requires access to details of the particular
-browser instance that it is testing so that it knows e.g. which port
-to connect to to send commands to the browser. These details are
-encapsulated in the :py:class:`ExecutorBrowser` class.
deleted file mode 100644
--- a/testing/web-platform/harness/docs/expectation.rst
+++ /dev/null
@@ -1,248 +0,0 @@
-Expectation Data
-================
-
-Introduction
-------------
-
-For use in continuous integration systems, and other scenarios where
-regression tracking is required, wptrunner supports storing and
-loading the expected result of each test in a test run. Typically
-these expected results will initially be generated by running the
-testsuite in a baseline build. They may then be edited by humans as
-new features are added to the product that change the expected
-results. The expected results may also vary for a single product
-depending on the platform on which it is run. Therefore, the raw
-structured log data is not a suitable format for storing these
-files. Instead something is required that is:
-
- * Human readable
-
- * Human editable
-
- * Machine readable / writable
-
- * Capable of storing test id / result pairs
-
- * Suitable for storing in a version control system (i.e. text-based)
-
-The need for different results per platform means either having
-multiple expectation files for each platform, or having a way to
-express conditional values within a certain file. The former would be
-rather cumbersome for humans updating the expectation files, so the
-latter approach has been adopted, leading to the requirement:
-
- * Capable of storing result values that are conditional on the platform.
-
-There are few extant formats that meet these requirements, so
-wptrunner uses a bespoke ``expectation manifest`` format, which is
-closely based on the standard ``ini`` format.
-
-Directory Layout
-----------------
-
-Expectation manifest files must be stored under the ``metadata``
-directory passed to the test runner. The directory layout follows that
-of web-platform-tests with each test path having a corresponding
-manifest file. Tests that differ only by query string, or reftests
-with the same test path but different ref paths share the same
-reference file. The file name is taken from the last /-separated part
-of the path, suffixed with ``.ini``.
-
-As an optimisation, files which produce only default results
-(i.e. ``PASS`` or ``OK``) don't require a corresponding manifest file.
-
-For example a test with url::
-
-  /spec/section/file.html?query=param
-
-would have an expectation file ::
-
-  metadata/spec/section/file.html.ini
-
-
-.. _wptupdate-label:
-
-Generating Expectation Files
-----------------------------
-
-wptrunner provides the tool ``wptupdate`` to generate expectation
-files from the results of a set of baseline test runs. The basic
-syntax for this is::
-
-  wptupdate [options] [logfile]...
-
-Each ``logfile`` is a structured log file from a previous run. These
-can be generated from wptrunner using the ``--log-raw`` option
-e.g. ``--log-raw=structured.log``. The default behaviour is to update
-all the test data for the particular combination of hardware and OS
-used in the run corresponding to the log data, whilst leaving any
-other expectations untouched.
-
-wptupdate takes several useful options:
-
-``--sync``
-  Pull the latest version of web-platform-tests from the
-  upstream specified in the config file. If this is specified in
-  combination with logfiles, it is assumed that the results in the log
-  files apply to the post-update tests.
-
-``--no-check-clean``
-  Don't attempt to check if the working directory is clean before
-  doing the update (assuming that the working directory is a git or
-  mercurial tree).
-
-``--patch``
-  Create a a git commit, or a mq patch, with the changes made by wptupdate.
-
-``--ignore-existing``
-  Overwrite all the expectation data for any tests that have a result
-  in the passed log files, not just data for the same platform.
-
-Examples
-~~~~~~~~
-
-Update the local copy of web-platform-tests without changing the
-expectation data and commit (or create a mq patch for) the result::
-
-  wptupdate --patch --sync
-
-Update all the expectations from a set of cross-platform test runs::
-
-  wptupdate --no-check-clean --patch osx.log linux.log windows.log
-
-Add expectation data for some new tests that are expected to be
-platform-independent::
-
-  wptupdate --no-check-clean --patch --ignore-existing tests.log
-
-Manifest Format
----------------
-The format of the manifest files is based on the ini format. Files are
-divided into sections, each (apart from the root section) having a
-heading enclosed in square braces. Within each section are key-value
-pairs. There are several notable differences from standard .ini files,
-however:
-
- * Sections may be hierarchically nested, with significant whitespace
-   indicating nesting depth.
-
- * Only ``:`` is valid as a key/value separator
-
-A simple example of a manifest file is::
-
-  root_key: root_value
-
-  [section]
-    section_key: section_value
-
-    [subsection]
-       subsection_key: subsection_value
-
-  [another_section]
-    another_key: another_value
-
-Conditional Values
-~~~~~~~~~~~~~~~~~~
-
-In order to support values that depend on some external data, the
-right hand side of a key/value pair can take a set of conditionals
-rather than a plain value. These values are placed on a new line
-following the key, with significant indentation. Conditional values
-are prefixed with ``if`` and terminated with a colon, for example::
-
-  key:
-    if cond1: value1
-    if cond2: value2
-    value3
-
-In this example, the value associated with ``key`` is determined by
-first evaluating ``cond1`` against external data. If that is true,
-``key`` is assigned the value ``value1``, otherwise ``cond2`` is
-evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
-the unconditional ``value3`` is used.
-
-Conditions themselves use a Python-like expression syntax. Operands
-can either be variables, corresponding to data passed in, numbers
-(integer or floating point; exponential notation is not supported) or
-quote-delimited strings. Equality is tested using ``==`` and
-inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
-used in the expected way. Parentheses can also be used for
-grouping. For example::
-
-  key:
-    if (a == 2 or a == 3) and b == "abc": value1
-    if a == 1 or b != "abc": value2
-    value3
-
-Here ``a`` and ``b`` are variables, the value of which will be
-supplied when the manifest is used.
-
-Expectation Manifests
----------------------
-
-When used for expectation data, manifests have the following format:
-
- * A section per test URL described by the manifest, with the section
-   heading being the part of the test URL following the last ``/`` in
-   the path (this allows multiple tests in a single manifest file with
-   the same path part of the URL, but different query parts).
-
- * A subsection per subtest, with the heading being the title of the
-   subtest.
-
- * A key ``type`` indicating the test type. This takes the values
-   ``testharness`` and ``reftest``.
-
- * For reftests, keys ``reftype`` indicating the reference type
-   (``==`` or ``!=``) and ``refurl`` indicating the URL of the
-   reference.
-
- * A key ``expected`` giving the expectation value of each (sub)test.
-
- * A key ``disabled`` which can be set to any value to indicate that
-   the (sub)test is disabled and should either not be run (for tests)
-   or that its results should be ignored (subtests).
-
- * A key ``restart-after`` which can be set to any value to indicate that
-   the runner should restart the browser after running this test (e.g. to
-   clear out unwanted state).
-
- * Variables ``debug``, ``os``, ``version``, ``processor`` and
-   ``bits`` that describe the configuration of the browser under
-   test. ``debug`` is a boolean indicating whether a build is a debug
-   build. ``os`` is a string indicating the operating system, and
-   ``version`` a string indicating the particular version of that
-   operating system. ``processor`` is a string indicating the
-   processor architecture and ``bits`` an integer indicating the
-   number of bits. This information is typically provided by
-   :py:mod:`mozinfo`.
-
- * Top level keys are taken as defaults for the whole file. So, for
-   example, a top level key with ``expected: FAIL`` would indicate
-   that all tests and subtests in the file are expected to fail,
-   unless they have an ``expected`` key of their own.
-
-An simple example manifest might look like::
-
-  [test.html?variant=basic]
-    type: testharness
-
-    [Test something unsupported]
-       expected: FAIL
-
-  [test.html?variant=broken]
-    expected: ERROR
-
-  [test.html?variant=unstable]
-    disabled: http://test.bugs.example.org/bugs/12345
-
-A more complex manifest with conditional properties might be::
-
-  [canvas_test.html]
-    expected:
-      if os == "osx": FAIL
-      if os == "windows" and version == "XP": FAIL
-      PASS
-
-Note that ``PASS`` in the above works, but is unnecessary; ``PASS``
-(or ``OK``) is always the default expectation for (sub)tests.
deleted file mode 100644
--- a/testing/web-platform/harness/docs/index.rst
+++ /dev/null
@@ -1,24 +0,0 @@
-.. wptrunner documentation master file, created by
-   sphinx-quickstart on Mon May 19 18:14:20 2014.
-   You can adapt this file completely to your liking, but it should at least
-   contain the root `toctree` directive.
-
-Welcome to wptrunner's documentation!
-=====================================
-
-Contents:
-
-.. toctree::
-   :maxdepth: 2
-
-   usage
-   expectation
-   design
-
-Indices and tables
-==================
-
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
-
deleted file mode 100644
--- a/testing/web-platform/harness/docs/make.bat
+++ /dev/null
@@ -1,242 +0,0 @@
-@ECHO OFF
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
-	set SPHINXBUILD=sphinx-build
-)
-set BUILDDIR=_build
-set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
-set I18NSPHINXOPTS=%SPHINXOPTS% .
-if NOT "%PAPER%" == "" (
-	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
-	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
-)
-
-if "%1" == "" goto help
-
-if "%1" == "help" (
-	:help
-	echo.Please use `make ^<target^>` where ^<target^> is one of
-	echo.  html       to make standalone HTML files
-	echo.  dirhtml    to make HTML files named index.html in directories
-	echo.  singlehtml to make a single large HTML file
-	echo.  pickle     to make pickle files
-	echo.  json       to make JSON files
-	echo.  htmlhelp   to make HTML files and a HTML help project
-	echo.  qthelp     to make HTML files and a qthelp project
-	echo.  devhelp    to make HTML files and a Devhelp project
-	echo.  epub       to make an epub
-	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
-	echo.  text       to make text files
-	echo.  man        to make manual pages
-	echo.  texinfo    to make Texinfo files
-	echo.  gettext    to make PO message catalogs
-	echo.  changes    to make an overview over all changed/added/deprecated items
-	echo.  xml        to make Docutils-native XML files
-	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
-	echo.  linkcheck  to check all external links for integrity
-	echo.  doctest    to run all doctests embedded in the documentation if enabled
-	goto end
-)
-
-if "%1" == "clean" (
-	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
-	del /q /s %BUILDDIR%\*
-	goto end
-)
-
-
-%SPHINXBUILD% 2> nul
-if errorlevel 9009 (
-	echo.
-	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
-	echo.installed, then set the SPHINXBUILD environment variable to point
-	echo.to the full path of the 'sphinx-build' executable. Alternatively you
-	echo.may add the Sphinx directory to PATH.
-	echo.
-	echo.If you don't have Sphinx installed, grab it from
-	echo.http://sphinx-doc.org/
-	exit /b 1
-)
-
-if "%1" == "html" (
-	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
-	goto end
-)
-
-if "%1" == "dirhtml" (
-	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
-	goto end
-)
-
-if "%1" == "singlehtml" (
-	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
-	goto end
-)
-
-if "%1" == "pickle" (
-	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can process the pickle files.
-	goto end
-)
-
-if "%1" == "json" (
-	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can process the JSON files.
-	goto end
-)
-
-if "%1" == "htmlhelp" (
-	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can run HTML Help Workshop with the ^
-.hhp project file in %BUILDDIR%/htmlhelp.
-	goto end
-)
-
-if "%1" == "qthelp" (
-	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; now you can run "qcollectiongenerator" with the ^
-.qhcp project file in %BUILDDIR%/qthelp, like this:
-	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptrunner.qhcp
-	echo.To view the help file:
-	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptrunner.ghc
-	goto end
-)
-
-if "%1" == "devhelp" (
-	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished.
-	goto end
-)
-
-if "%1" == "epub" (
-	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The epub file is in %BUILDDIR%/epub.
-	goto end
-)
-
-if "%1" == "latex" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "latexpdf" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	cd %BUILDDIR%/latex
-	make all-pdf
-	cd %BUILDDIR%/..
-	echo.
-	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "latexpdfja" (
-	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
-	cd %BUILDDIR%/latex
-	make all-pdf-ja
-	cd %BUILDDIR%/..
-	echo.
-	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
-	goto end
-)
-
-if "%1" == "text" (
-	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The text files are in %BUILDDIR%/text.
-	goto end
-)
-
-if "%1" == "man" (
-	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The manual pages are in %BUILDDIR%/man.
-	goto end
-)
-
-if "%1" == "texinfo" (
-	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
-	goto end
-)
-
-if "%1" == "gettext" (
-	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
-	goto end
-)
-
-if "%1" == "changes" (
-	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.The overview file is in %BUILDDIR%/changes.
-	goto end
-)
-
-if "%1" == "linkcheck" (
-	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Link check complete; look for any errors in the above output ^
-or in %BUILDDIR%/linkcheck/output.txt.
-	goto end
-)
-
-if "%1" == "doctest" (
-	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Testing of doctests in the sources finished, look at the ^
-results in %BUILDDIR%/doctest/output.txt.
-	goto end
-)
-
-if "%1" == "xml" (
-	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The XML files are in %BUILDDIR%/xml.
-	goto end
-)
-
-if "%1" == "pseudoxml" (
-	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
-	if errorlevel 1 exit /b 1
-	echo.
-	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
-	goto end
-)
-
-:end
deleted file mode 100644
--- a/testing/web-platform/harness/docs/usage.rst
+++ /dev/null
@@ -1,238 +0,0 @@
-Getting Started
-===============
-
-Installing wptrunner
---------------------
-
-The easiest way to install wptrunner is into a virtualenv, using pip::
-
-  virtualenv wptrunner
-  cd wptrunner
-  source bin/activate
-  pip install wptrunner
-
-This will install the base dependencies for wptrunner, but not any
-extra dependencies required to test against specific browsers. In
-order to do this you must use use the extra requirements files in
-``$VIRTUAL_ENV/requirements/requirements_browser.txt``. For example,
-in order to test against Firefox you would have to run::
-
-  pip install -r requirements/requirements_firefox.txt
-
-If you intend to work on the code, the ``-e`` option to pip should be
-used in combination with a source checkout i.e. inside a virtual
-environment created as above::
-
-  git clone https://github.com/w3c/wptrunner.git
-  cd wptrunner
-  pip install -e ./
-
-In addition to the dependencies installed by pip, wptrunner requires
-a copy of the web-platform-tests repository. This can be located
-anywhere on the filesystem, but the easiest option is to put it
-under the same parent directory as the wptrunner checkout::
-
-  git clone https://github.com/w3c/web-platform-tests.git
-
-It is also necessary to generate a web-platform-tests ``MANIFEST.json``
-file. It's recommended to also put that under the same parent directory as
-the wptrunner checkout, in a directory named ``meta``::
-
-  mkdir meta
-  cd web-platform-tests
-  python manifest --path ../meta/MANIFEST.json
-
-The ``MANIFEST.json`` file needs to be regenerated each time the
-web-platform-tests checkout is updated. To aid with the update process
-there is a tool called ``wptupdate``, which is described in
-:ref:`wptupdate-label`.
-
-Running the Tests
------------------
-
-A test run is started using the ``wptrunner`` command.  The command
-takes multiple options, of which the following are most significant:
-
-``--product`` (defaults to `firefox`)
-  The product to test against: `b2g`, `chrome`, `firefox`, or `servo`.
-
-``--binary`` (required if product is `firefox` or `servo`)
-  The path to a binary file for the product (browser) to test against.
-
-``--webdriver-binary`` (required if product is `chrome`)
-  The path to a `*driver` binary; e.g., a `chromedriver` binary.
-
-``--certutil-binary`` (required if product is `firefox` [#]_)
-  The path to a `certutil` binary (for tests that must be run over https).
-
-``--metadata`` (required only when not `using default paths`_)
-  The path to a directory containing test metadata. [#]_
-
-``--tests`` (required only when not `using default paths`_)
-  The path to a directory containing a web-platform-tests checkout.
-
-``--prefs-root`` (required only when testing a Firefox binary)
-  The path to a directory containing Firefox test-harness preferences. [#]_
-
-``--config`` (should default to `wptrunner.default.ini`)
-  The path to the config (ini) file.
-
-.. [#] The ``--certutil-binary`` option is required when the product is
-   ``firefox`` unless ``--ssl-type=none`` is specified.
-
-.. [#] The ``--metadata`` path is to a directory that contains:
-
-  * a ``MANIFEST.json`` file (the web-platform-tests documentation has
-    instructions on generating this file)
-  * (optionally) any expectation files (see :ref:`wptupdate-label`)
-
-.. [#] Example ``--prefs-root`` value: ``~/mozilla-central/testing/profiles``.
-
-There are also a variety of other command-line options available; use
-``--help`` to list them.
-
-The following examples show how to start wptrunner with various options.
-
-------------------
-Starting wptrunner
-------------------
-
-The examples below assume the following directory layout,
-though no specific folder structure is required::
-
-  ~/testtwf/wptrunner          # wptrunner checkout
-  ~/testtwf/web-platform-tests # web-platform-tests checkout
-  ~/testtwf/meta               # metadata
-
-To test a Firefox Nightly build in an OS X environment, you might start
-wptrunner using something similar to the following example::
-
-  wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
-    --binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/dist/Nightly.app/Contents/MacOS/firefox \
-    --certutil-binary=~/mozilla-central/obj-x86_64-apple-darwin14.3.0/security/nss/cmd/certutil/certutil \
-    --prefs-root=~/mozilla-central/testing/profiles
-
-
-And to test a Chromium build in an OS X environment, you might start
-wptrunner using something similar to the following example::
-
-  wptrunner --metadata=~/testtwf/meta/ --tests=~/testtwf/web-platform-tests/ \
-    --binary=~/chromium/src/out/Release/Chromium.app/Contents/MacOS/Chromium \
-    --webdriver-binary=/usr/local/bin/chromedriver --product=chrome
-
---------------------
-Running test subsets
---------------------
-
-To restrict a test run just to tests in a particular web-platform-tests
-subdirectory, specify the directory name in the positional arguments after
-the options; for example, run just the tests in the `dom` subdirectory::
-
-  wptrunner --metadata=~/testtwf/meta --tests=~/testtwf/web-platform-tests/ \
-    --binary=/path/to/firefox --certutil-binary=/path/to/certutil \
-    --prefs-root=/path/to/testing/profiles \
-    dom
-
--------------------
-Running in parallel
--------------------
-
-To speed up the testing process, use the ``--processes`` option to have
-wptrunner run multiple browser instances in parallel. For example, to
-have wptrunner attempt to run tests against with six browser instances
-in parallel, specify ``--processes=6``. But note that behaviour in this
-mode is necessarily less deterministic than with ``--processes=1`` (the
-default), so there may be more noise in the test results.
-
--------------------
-Using default paths
--------------------
-
-The (otherwise-required) ``--tests`` and ``--metadata`` command-line
-options/flags be omitted if any configuration file is found that
-contains a section specifying the ``tests`` and ``metadata`` keys.
-
-See the `Configuration File`_ section for more information about
-configuration files, including information about their expected
-locations.
-
-The content of the ``wptrunner.default.ini`` default configuration file
-makes wptrunner look for tests (that is, a web-platform-tests checkout)
-as a subdirectory of the current directory named ``tests``, and for
-metadata files in a subdirectory of the current directory named ``meta``.
-
-Output
-------
-
-wptrunner uses the :py:mod:`mozlog` package for output. This
-structures events such as test results or log messages as JSON objects
-that can then be fed to other tools for interpretation. More details
-about the message format are given in the
-:py:mod:`mozlog` documentation.
-
-By default the raw JSON messages are dumped to stdout. This is
-convenient for piping into other tools, but not ideal for humans
-reading the output. :py:mod:`mozlog` comes with several other
-formatters, which are accessible through command line options. The
-general format of these options is ``--log-name=dest``, where ``name``
-is the name of the format and ``dest`` is a path to a destination
-file, or ``-`` for stdout. The raw JSON data is written by the ``raw``
-formatter so, the default setup corresponds to ``--log-raw=-``.
-
-A reasonable output format for humans is provided as ``mach``. So in
-order to output the full raw log to a file and a human-readable
-summary to stdout, one might pass the options::
-
-  --log-raw=output.log --log-mach=-
-
-Configuration File
-------------------
-
-wptrunner uses a ``.ini`` file to control some configuration
-sections. The file has three sections; ``[products]``,
-``[manifest:default]`` and ``[web-platform-tests]``.
-
-``[products]`` is used to
-define the set of available products. By default this section is empty
-which means that all the products distributed with wptrunner are
-enabled (although their dependencies may not be installed). The set
-of enabled products can be set by using the product name as the
-key. For built in products the value is empty. It is also possible to
-provide the path to a script implementing the browser functionality
-e.g.::
-
-  [products]
-  chrome =
-  netscape4 = path/to/netscape.py
-
-``[manifest:default]`` specifies the default paths for the tests and metadata,
-relative to the config file. For example::
-
-  [manifest:default]
-  tests = ~/testtwf/web-platform-tests
-  metadata = ~/testtwf/meta
-
-
-``[web-platform-tests]`` is used to set the properties of the upstream
-repository when updating the paths. ``remote_url`` specifies the git
-url to pull from; ``branch`` the branch to sync against and
-``sync_path`` the local path, relative to the configuration file, to
-use when checking out the tests e.g.::
-
-  [web-platform-tests]
-  remote_url = https://github.com/w3c/web-platform-tests.git
-  branch = master
-  sync_path = sync
-
-A configuration file must contain all the above fields; falling back
-to the default values for unspecified fields is not yet supported.
-
-The ``wptrunner`` and ``wptupdate`` commands will use configuration
-files in the following order:
-
- * Any path supplied with a ``--config`` flag to the command.
-
- * A file called ``wptrunner.ini`` in the current directory
-
- * The default configuration file (``wptrunner.default.ini`` in the
-   source directory)
deleted file mode 100644
--- a/testing/web-platform/harness/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-html5lib >= 0.99
-mozinfo >= 0.7
-mozlog >= 3.3
-mozdebug >= 0.1
deleted file mode 100644
--- a/testing/web-platform/harness/requirements_chrome.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-mozprocess >= 0.19
-selenium >= 2.41.0
deleted file mode 100644
--- a/testing/web-platform/harness/requirements_firefox.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-marionette_driver >= 0.4
-mozprofile >= 0.21
-mozprocess >= 0.19
-mozcrash >= 0.13
-mozrunner >= 6.7
-mozleak >= 0.1
deleted file mode 100644
--- a/testing/web-platform/harness/requirements_servo.txt
+++ /dev/null
@@ -1,1 +0,0 @@
-mozprocess >= 0.19
deleted file mode 100644
--- a/testing/web-platform/harness/setup.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import glob
-import os
-import sys
-import textwrap
-
-from setuptools import setup, find_packages
-
-here = os.path.split(__file__)[0]
-
-PACKAGE_NAME = 'wptrunner'
-PACKAGE_VERSION = '1.14'
-
-# Dependencies
-with open(os.path.join(here, "requirements.txt")) as f:
-    deps = f.read().splitlines()
-
-# Browser-specific requirements
-requirements_files = glob.glob("requirements_*.txt")
-
-profile_dest = None
-dest_exists = False
-
-setup(name=PACKAGE_NAME,
-      version=PACKAGE_VERSION,
-      description="Harness for running the W3C web-platform-tests against various products",
-      author='Mozilla Automation and Testing Team',
-      author_email='tools@lists.mozilla.org',
-      license='MPL 2.0',
-      packages=find_packages(exclude=["tests", "metadata", "prefs"]),
-      entry_points={
-          'console_scripts': [
-              'wptrunner = wptrunner.wptrunner:main',
-              'wptupdate = wptrunner.update:main',
-          ]
-      },
-      zip_safe=False,
-      platforms=['Any'],
-      classifiers=['Development Status :: 4 - Beta',
-                   'Environment :: Console',
-                   'Intended Audience :: Developers',
-                   'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
-                   'Operating System :: OS Independent'],
-      package_data={"wptrunner": ["executors/testharness_marionette.js",
-                                  "executors/testharness_webdriver.js",
-                                  "executors/reftest.js",
-                                  "executors/reftest-wait.js",
-                                  "testharnessreport.js",
-                                  "testharness_runner.html",
-                                  "config.json",
-                                  "wptrunner.default.ini",
-                                  "browsers/server-locations.txt",
-                                  "browsers/b2g_setup/*",
-                                  "prefs/*"]},
-      include_package_data=True,
-      data_files=[("requirements", requirements_files)],
-      install_requires=deps
-     )
-
-if "install" in sys.argv:
-    path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
-    print textwrap.fill("""In order to use with one of the built-in browser
-products, you will need to install the extra dependencies. These are provided
-as requirements_[name].txt in the %s directory and can be installed using
-e.g.""" % path, 80)
-
-    print """
-
-pip install -r %s/requirements_firefox.txt
-""" % path
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_and_fail.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_and_fail.html]
-  type: reftest
-  expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_cycle_fail.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_cycle_fail.html]
-  type: reftest
-  expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_match_fail.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_match_fail.html]
-  type: reftest
-  expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_mismatch_fail.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_mismatch_fail.html]
-  type: reftest
-  expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_ref_timeout.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_ref_timeout.html]
-  type: reftest
-  expected: TIMEOUT
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/reftest/reftest_timeout.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[reftest_timeout.html]
-  type: reftest
-  expected: TIMEOUT
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/firefox/__dir__.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-prefs: ["browser.display.foreground_color:#FF0000",
-        "browser.display.background_color:#000000"]
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/firefox/subdir/test_pref_reset.html.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[test_pref_reset.html]
-  prefs: [@Reset]
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/firefox/test_pref_set.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[test_pref_set.html]
-  prefs: ["browser.display.foreground_color:#00FF00",
-          "browser.display.background_color:#000000"]
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/subdir/__dir__.ini
+++ /dev/null
@@ -1,1 +0,0 @@
-disabled: true
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/subdir/testharness_1.html.ini
+++ /dev/null
@@ -1,2 +0,0 @@
-[testharness_1.html]
-  disabled: @False
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/testharness_0.html.ini
+++ /dev/null
@@ -1,4 +0,0 @@
-[testharness_0.html]
-  type: testharness
-  [Test that should fail]
-    expected: FAIL
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/testharness_error.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[testharness_error.html]
-  type: testharness
-  expected: ERROR
deleted file mode 100644
--- a/testing/web-platform/harness/test/metadata/testharness/testharness_timeout.html.ini
+++ /dev/null
@@ -1,3 +0,0 @@
-[testharness_timeout.html]
-  type: testharness
-  expected: TIMEOUT
deleted file mode 100644
--- a/testing/web-platform/harness/test/test.cfg.example
+++ /dev/null
@@ -1,20 +0,0 @@
-[general]
-tests=/path/to/web-platform-tests/
-metadata=/path/to/web-platform-tests/
-ssl-type=none
-
-# [firefox]
-# binary=/path/to/firefox
-# prefs-root=/path/to/gecko-src/testing/profiles/
-
-# [servo]
-# binary=/path/to/servo-src/target/release/servo
-# exclude=testharness # Because it needs a special testharness.js
-
-# [servodriver]
-# binary=/path/to/servo-src/target/release/servo
-# exclude=testharness # Because it needs a special testharness.js
-
-# [chrome]
-# binary=/path/to/chrome
-# webdriver-binary=/path/to/chromedriver
deleted file mode 100644
--- a/testing/web-platform/harness/test/test.py
+++ /dev/null
@@ -1,166 +0,0 @@
-import ConfigParser
-import argparse
-import json
-import os
-import sys
-import tempfile
-import threading
-import time
-from StringIO import StringIO
-
-from mozlog import structuredlog, reader
-from mozlog.handlers import BaseHandler, StreamHandler, StatusHandler
-from mozlog.formatters import MachFormatter
-from wptrunner import wptcommandline, wptrunner
-
-here = os.path.abspath(os.path.dirname(__file__))
-
-def setup_wptrunner_logging(logger):
-    structuredlog.set_default_logger(logger)
-    wptrunner.logger = logger
-    wptrunner.wptlogging.setup_stdlib_logger()
-
-class ResultHandler(BaseHandler):
-    def __init__(self, verbose=False, logger=None):
-        self.inner = StreamHandler(sys.stdout, MachFormatter())
-        BaseHandler.__init__(self, self.inner)
-        self.product = None
-        self.verbose = verbose
-        self.logger = logger
-
-        self.register_message_handlers("wptrunner-test", {"set-product": self.set_product})
-
-    def set_product(self, product):
-        self.product = product
-
-    def __call__(self, data):
-        if self.product is not None and data["action"] in ["suite_start", "suite_end"]:
-            # Hack: mozlog sets some internal state to prevent multiple suite_start or
-            # suite_end messages. We actually want that here (one from the metaharness
-            # and one from the individual test type harness), so override that internal
-            # state (a better solution might be to not share loggers, but this works well
-            # enough)
-            self.logger._state.suite_started = True
-            return
-
-        if (not self.verbose and
-            (data["action"] == "process_output" or
-             data["action"] == "log" and data["level"] not in ["error", "critical"])):
-            return
-
-        if "test" in data:
-            data = data.copy()
-            data["test"] = "%s: %s" % (self.product, data["test"])
-
-        return self.inner(data)
-
-def test_settings():
-    return {
-        "include": "_test",
-        "manifest-update": "",
-        "no-capture-stdio": ""
-    }
-
-def read_config():
-    parser = ConfigParser.ConfigParser()
-    parser.read("test.cfg")
-
-    rv = {"general":{},
-          "products":{}}
-
-    rv["general"].update(dict(parser.items("general")))
-
-    # This only allows one product per whatever for now
-    for product in parser.sections():
-        if product != "general":
-            dest = rv["products"][product] = {}
-            for key, value in parser.items(product):
-                rv["products"][product][key] = value
-
-    return rv
-
-def run_tests(product, kwargs):
-    kwargs["test_paths"]["/_test/"] = {"tests_path": os.path.join(here, "testdata"),
-                                       "metadata_path": os.path.join(here, "metadata")}
-
-    wptrunner.run_tests(**kwargs)
-
-def settings_to_argv(settings):
-    rv = []
-    for name, value in settings.iteritems():
-        key = "--%s" % name
-        if not value:
-            rv.append(key)
-        elif isinstance(value, list):
-            for item in value:
-                rv.extend([key, item])
-        else:
-            rv.extend([key, value])
-    return rv
-
-def set_from_args(settings, args):
-    if args.test:
-        settings["include"] = args.test
-    if args.tags:
-        settings["tags"] = args.tags
-
-def run(config, args):
-    logger = structuredlog.StructuredLogger("web-platform-tests")
-    logger.add_handler(ResultHandler(logger=logger, verbose=args.verbose))
-    setup_wptrunner_logging(logger)
-
-    parser = wptcommandline.create_parser()
-
-    logger.suite_start(tests=[])
-
-    for product, product_settings in config["products"].iteritems():
-        if args.product and product not in args.product:
-            continue
-
-        settings = test_settings()
-        settings.update(config["general"])
-        settings.update(product_settings)
-        settings["product"] = product
-        set_from_args(settings, args)
-
-        kwargs = vars(parser.parse_args(settings_to_argv(settings)))
-        wptcommandline.check_args(kwargs)
-
-        logger.send_message("wptrunner-test", "set-product", product)
-
-        run_tests(product, kwargs)
-
-    logger.send_message("wptrunner-test", "set-product", None)
-    logger.suite_end()
-
-def get_parser():
-    parser = argparse.ArgumentParser()
-    parser.add_argument("-v", "--verbose", action="store_true", default=False,
-                        help="verbose log output")
-    parser.add_argument("--product", action="append",
-                        help="Specific product to include in test run")
-    parser.add_argument("--pdb", action="store_true",
-                        help="Invoke pdb on uncaught exception")
-    parser.add_argument("--tag", action="append", dest="tags",
-                        help="tags to select tests")
-    parser.add_argument("test", nargs="*",
-                        help="Specific tests to include in test run")
-    return parser
-
-def main():
-    config = read_config()
-
-    args = get_parser().parse_args()
-
-    try:
-        run(config, args)
-    except Exception:
-        if args.pdb:
-            import pdb, traceback
-            print traceback.format_exc()
-            pdb.post_mortem()
-        else:
-            raise
-
-if __name__ == "__main__":
-    main()
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/green-ref.html
+++ /dev/null
@@ -1,4 +0,0 @@
-<link rel=match href=green.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/green.html
+++ /dev/null
@@ -1,3 +0,0 @@
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/red.html
+++ /dev/null
@@ -1,3 +0,0 @@
-<style>
-:root {background-color:red}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest.https.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<link rel=match href=green.html>
-<style>
-:root {background-color:red}
-</style>
-<script>
-if (window.location.protocol === "https:") {
-   document.documentElement.style.backgroundColor = "green";
-}
-</script>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_and_fail.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest chain that should fail</title>
-<link rel=match href=reftest_and_fail_0-ref.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_and_fail_0-ref.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest chain that should fail</title>
-<link rel=match href=red.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_cycle.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest with cycle, all match</title>
-<link rel=match href=reftest_cycle_0-ref.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_cycle_0-ref.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>OR match that should pass</title>
-<link rel=match href=reftest_cycle_1-ref.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_cycle_1-ref.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest with cycle, all match</title>
-<link rel=match href=reftest_cycle.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_cycle_fail.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest with cycle, fails</title>
-<link rel=match href=reftest_cycle_fail_0-ref.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_cycle_fail_0-ref.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>Reftest with cycle, fails</title>
-<link rel=mismatch href=reftest_cycle_fail.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_match.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>rel=match that should pass</title>
-<link rel=match href=green.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_match_fail.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>rel=match that should fail</title>
-<link rel=match href=red.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_mismatch.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>rel=mismatch that should pass</title>
-<link rel=mismatch href=red.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_mismatch_fail.html
+++ /dev/null
@@ -1,5 +0,0 @@
-<title>rel=mismatch that should fail</title>
-<link rel=mismatch href=green.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_or_0.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<title>OR match that should pass</title>
-<link rel=match href=red.html>
-<link rel=match href=green.html>
-<style>
-:root {background-color:green}
-</style>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_ref_timeout-ref.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<html class="reftest-wait">
-<title>rel=match that should time out in the ref</title>
-<link rel=match href=reftest_ref_timeout-ref.html>
-<style>
-:root {background-color:green}
-</style>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_ref_timeout.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<html>
-<title>rel=match that should time out in the ref</title>
-<link rel=match href=reftest_ref_timeout-ref.html>
-<style>
-:root {background-color:green}
-</style>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_timeout.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<html class="reftest-wait">
-<title>rel=match that should timeout</title>
-<link rel=match href=green.html>
-<style>
-:root {background-color:green}
-</style>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/reftest/reftest_wait_0.html
+++ /dev/null
@@ -1,13 +0,0 @@
-<html class="reftest-wait">
-<title>rel=match that should fail</title>
-<link rel=match href=red.html>
-<style>
-:root {background-color:red}
-</style>
-<script>
-setTimeout(function() {
-  document.documentElement.style.backgroundColor = "green";
-  document.documentElement.className = "";
-}, 2000);
-</script>
-</html>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/firefox/subdir/test_pref_inherit.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!doctype html>
-<title>Example pref test</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
-<script>
-test(function() {
-  assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
-}, "Test that pref was set");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/firefox/subdir/test_pref_reset.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!doctype html>
-<title>Example pref test</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
-<script>
-test(function() {
-  assert_equals(getComputedStyle(document.body).color, "rgb(0, 0, 0)");
-}, "Test that pref was reset");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/firefox/test_pref_dir.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!doctype html>
-<title>Example pref test</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<p>Test requires the pref browser.display.foreground_color to be set to #FF0000</p>
-<script>
-test(function() {
-  assert_equals(getComputedStyle(document.body).color, "rgb(255, 0, 0)");
-}, "Test that pref was set");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/firefox/test_pref_set.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!doctype html>
-<title>Example pref test</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<p>Test requires the pref browser.display.foreground_color to be set to #00FF00</p>
-<script>
-test(function() {
-  assert_equals(getComputedStyle(document.body).color, "rgb(0, 255, 0)");
-}, "Test that pref was set");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/subdir/testharness_1.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<!doctype html>
-<title>Test should be enabled</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<script>
-test(function() {
-  assert_true(true);
-}, "Test that should pass");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/testharness.https.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!doctype html>
-<title>Example https test</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<script>
-test(function() {
-  assert_equals(window.location.protocol, "https:");
-}, "Test that file was loaded with the correct protocol");
-
-</script>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/testharness_0.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<!doctype html>
-<title>Test should be disabled</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<script>
-test(function() {
-  assert_true(false);
-}, "Test that should fail");
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/testharness_error.html
+++ /dev/null
@@ -1,7 +0,0 @@
-<!doctype html>
-<title>testharness.js test that should error</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<script>
-undefined_function()
-</script>
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/testharness_long_timeout.html
+++ /dev/null
@@ -1,9 +0,0 @@
-<!doctype html>
-<title>testharness.js test with long timeout</title>
-<meta name=timeout content=long>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-<script>
-var t = async_test("Long timeout test");
-setTimeout(t.step_func_done(function() {assert_true(true)}), 15*1000);
-</script>
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/test/testdata/testharness/testharness_timeout.html
+++ /dev/null
@@ -1,6 +0,0 @@
-<!doctype html>
-<title>Simple testharness.js usage</title>
-<script src="/resources/testharness.js"></script>
-<script src="/resources/testharnessreport.js"></script>
-
-// This file should time out, obviously
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/tox.ini
+++ /dev/null
@@ -1,15 +0,0 @@
-[pytest]
-xfail_strict=true
-
-[tox]
-envlist = {py27,pypy}-{base,b2g,chrome,firefox,servo}
-
-[testenv]
-deps =
-     pytest>=2.9
-     -r{toxinidir}/requirements.txt
-     chrome: -r{toxinidir}/requirements_chrome.txt
-     firefox: -r{toxinidir}/requirements_firefox.txt
-     servo: -r{toxinidir}/requirements_servo.txt
-
-commands = py.test []
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner.default.ini
+++ /dev/null
@@ -1,11 +0,0 @@
-[products]
-
-[web-platform-tests]
-remote_url = https://github.com/w3c/web-platform-tests.git
-branch = master
-sync_path = %(pwd)s/sync
-
-[manifest:default]
-tests = %(pwd)s/tests
-metadata = %(pwd)s/meta
-url_base = /
\ No newline at end of file
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/__init__.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-"""Subpackage where each product is defined. Each product is created by adding a
-a .py file containing a __wptrunner__ variable in the global scope. This must be
-a dictionary with the fields
-
-"product": Name of the product, assumed to be unique.
-"browser": String indicating the Browser implementation used to launch that
-           product.
-"executor": Dictionary with keys as supported test types and values as the name
-            of the Executor implemantation that will be used to run that test
-            type.
-"browser_kwargs": String naming function that takes product, binary,
-                  prefs_root and the wptrunner.run_tests kwargs dict as arguments
-                  and returns a dictionary of kwargs to use when creating the
-                  Browser class.
-"executor_kwargs": String naming a function that takes http server url and
-                   timeout multiplier and returns kwargs to use when creating
-                   the executor class.
-"env_options": String naming a funtion of no arguments that returns the
-               arguments passed to the TestEnvironment.
-
-All classes and functions named in the above dict must be imported into the
-module global scope.
-"""
-
-product_list = ["chrome",
-                "edge",
-                "firefox",
-                "servo",
-                "servodriver"]
deleted file mode 100644
index f9cbd5300ad650f8369ab02a4adc8866a5f0dea1..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
GIT binary patch
literal 0
Hc$@<O00001
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/base.py
+++ /dev/null
@@ -1,166 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import platform
-import socket
-from abc import ABCMeta, abstractmethod
-
-from ..wptcommandline import require_arg
-
-here = os.path.split(__file__)[0]
-
-
-def cmd_arg(name, value=None):
-    prefix = "-" if platform.system() == "Windows" else "--"
-    rv = prefix + name
-    if value is not None:
-        rv += "=" + value
-    return rv
-
-
-def get_free_port(start_port, exclude=None):
-    """Get the first port number after start_port (inclusive) that is
-    not currently bound.
-
-    :param start_port: Integer port number at which to start testing.
-    :param exclude: Set of port numbers to skip"""
-    port = start_port
-    while True:
-        if exclude and port in exclude:
-            port += 1
-            continue
-        s = socket.socket()
-        try:
-            s.bind(("127.0.0.1", port))
-        except socket.error:
-            port += 1
-        else:
-            return port
-        finally:
-            s.close()
-
-def browser_command(binary, args, debug_info):
-    if debug_info:
-        if debug_info.requiresEscapedArgs:
-            args = [item.replace("&", "\\&") for item in args]
-        debug_args = [debug_info.path] + debug_info.args
-    else:
-        debug_args = []
-
-    command = [binary] + args
-
-    return debug_args, command
-
-
-class BrowserError(Exception):
-    pass
-
-
-class Browser(object):
-    __metaclass__ = ABCMeta
-
-    process_cls = None
-    init_timeout = 30
-
-    def __init__(self, logger):
-        """Abstract class serving as the basis for Browser implementations.
-
-        The Browser is used in the TestRunnerManager to start and stop the browser
-        process, and to check the state of that process. This class also acts as a
-        context manager, enabling it to do browser-specific setup at the start of
-        the testrun and cleanup after the run is complete.
-
-        :param logger: Structured logger to use for output.
-        """
-        self.logger = logger
-
-    def __enter__(self):
-        self.setup()
-        return self
-
-    def __exit__(self, *args, **kwargs):
-        self.cleanup()
-
-    def setup(self):
-        """Used for browser-specific setup that happens at the start of a test run"""
-        pass
-
-    def settings(self, test):
-        return {}
-
-    @abstractmethod
-    def start(self, **kwargs):
-        """Launch the browser object and get it into a state where is is ready to run tests"""
-        pass
-
-    @abstractmethod
-    def stop(self, force=False):
-        """Stop the running browser process."""
-        pass
-
-    @abstractmethod
-    def pid(self):
-        """pid of the browser process or None if there is no pid"""
-        pass
-
-    @abstractmethod
-    def is_alive(self):
-        """Boolean indicating whether the browser process is still running"""
-        pass
-
-    def setup_ssl(self, hosts):
-        """Return a certificate to use for tests requiring ssl that will be trusted by the browser"""
-        raise NotImplementedError("ssl testing not supported")
-
-    def cleanup(self):
-        """Browser-specific cleanup that is run after the testrun is finished"""
-        pass
-
-    def executor_browser(self):
-        """Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
-        with which it should be instantiated"""
-        return ExecutorBrowser, {}
-
-    def log_crash(self, process, test):
-        """Return a list of dictionaries containing information about crashes that happend
-        in the browser, or an empty list if no crashes occurred"""
-        self.logger.crash(process, test)
-
-
-class NullBrowser(Browser):
-    def __init__(self, logger, **kwargs):
-        super(NullBrowser, self).__init__(logger)
-
-    def start(self, **kwargs):
-        """No-op browser to use in scenarios where the TestRunnerManager shouldn't
-        actually own the browser process (e.g. Servo where we start one browser
-        per test)"""
-        pass
-
-    def stop(self, force=False):
-        pass
-
-    def pid(self):
-        return None
-
-    def is_alive(self):
-        return True
-
-    def on_output(self, line):
-        raise NotImplementedError
-
-
-class ExecutorBrowser(object):
-    def __init__(self, **kwargs):
-        """View of the Browser used by the Executor object.
-        This is needed because the Executor runs in a child process and
-        we can't ship Browser instances between processes on Windows.
-
-        Typically this will have a few product-specific properties set,
-        but in some cases it may have more elaborate methods for setting
-        up the browser from the runner process.
-        """
-        for k, v in kwargs.iteritems():
-            setattr(self, k, v)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/chrome.py
+++ /dev/null
@@ -1,89 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from .base import Browser, ExecutorBrowser, require_arg
-from ..webdriver_server import ChromeDriverServer
-from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-
-
-__wptrunner__ = {"product": "chrome",
-                 "check_args": "check_args",
-                 "browser": "ChromeBrowser",
-                 "executor": {"testharness": "SeleniumTestharnessExecutor",
-                              "reftest": "SeleniumRefTestExecutor"},
-                 "browser_kwargs": "browser_kwargs",
-                 "executor_kwargs": "executor_kwargs",
-                 "env_options": "env_options"}
-
-
-def check_args(**kwargs):
-    require_arg(kwargs, "webdriver_binary")
-
-
-def browser_kwargs(test_type, run_info_data, **kwargs):
-    return {"binary": kwargs["binary"],
-            "webdriver_binary": kwargs["webdriver_binary"]}
-
-
-def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
-                    **kwargs):
-    from selenium.webdriver import DesiredCapabilities
-
-    executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
-    executor_kwargs["close_after_done"] = True
-    capabilities = dict(DesiredCapabilities.CHROME.items())
-    capabilities.setdefault("chromeOptions", {})["prefs"] = {
-        "profile": {
-            "default_content_setting_values": {
-                "popups": 1
-            }
-        }
-    }
-    for (kwarg, capability) in [("binary", "binary"), ("binary_args", "args")]:
-        if kwargs[kwarg] is not None:
-            capabilities["chromeOptions"][capability] = kwargs[kwarg]
-    executor_kwargs["capabilities"] = capabilities
-    return executor_kwargs
-
-
-def env_options():
-    return {"host": "web-platform.test",
-            "bind_hostname": "true"}
-
-
-class ChromeBrowser(Browser):
-    """Chrome is backed by chromedriver, which is supplied through
-    ``wptrunner.webdriver.ChromeDriverServer``.
-    """
-
-    def __init__(self, logger, binary, webdriver_binary="chromedriver"):
-        """Creates a new representation of Chrome.  The `binary` argument gives
-        the browser binary to use for testing."""
-        Browser.__init__(self, logger)
-        self.binary = binary
-        self.server = ChromeDriverServer(self.logger, binary=webdriver_binary)
-
-    def start(self, **kwargs):
-        self.server.start(block=False)
-
-    def stop(self, force=False):
-        self.server.stop(force=Force)
-
-    def pid(self):
-        return self.server.pid
-
-    def is_alive(self):
-        # TODO(ato): This only indicates the driver is alive,
-        # and doesn't say anything about whether a browser session
-        # is active.
-        return self.server.is_alive()
-
-    def cleanup(self):
-        self.stop()
-
-    def executor_browser(self):
-        return ExecutorBrowser, {"webdriver_url": self.server.url}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/edge.py
+++ /dev/null
@@ -1,71 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from .base import Browser, ExecutorBrowser, require_arg
-from ..webdriver_server import EdgeDriverServer
-from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorselenium import (SeleniumTestharnessExecutor,
-                                          SeleniumRefTestExecutor)
-
-__wptrunner__ = {"product": "edge",
-                 "check_args": "check_args",
-                 "browser": "EdgeBrowser",
-                 "executor": {"testharness": "SeleniumTestharnessExecutor",
-                              "reftest": "SeleniumRefTestExecutor"},
-                 "browser_kwargs": "browser_kwargs",
-                 "executor_kwargs": "executor_kwargs",
-                 "env_options": "env_options"}
-
-
-def check_args(**kwargs):
-    require_arg(kwargs, "webdriver_binary")
-
-def browser_kwargs(test_type, run_info_data, **kwargs):
-    return {"webdriver_binary": kwargs["webdriver_binary"]}
-
-def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
-                    **kwargs):
-    from selenium.webdriver import DesiredCapabilities
-
-    executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
-    executor_kwargs["close_after_done"] = True
-    executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
-    return executor_kwargs
-
-def env_options():
-    return {"host": "web-platform.test",
-            "bind_hostname": "true",
-            "supports_debugger": False}
-
-class EdgeBrowser(Browser):
-    used_ports = set()
-
-    def __init__(self, logger, webdriver_binary):
-        Browser.__init__(self, logger)
-        self.server = EdgeDriverServer(self.logger, binary=webdriver_binary)
-        self.webdriver_host = "localhost"
-        self.webdriver_port = self.server.port
-
-    def start(self, **kwargs):
-        print self.server.url
-        self.server.start()
-
-    def stop(self):
-        self.server.stop()
-
-    def pid(self):
-        return self.server.pid
-
-    def is_alive(self):
-        # TODO(ato): This only indicates the server is alive,
-        # and doesn't say anything about whether a browser session
-        # is active.
-        return self.server.is_alive()
-
-    def cleanup(self):
-        self.stop()
-
-    def executor_browser(self):
-        return ExecutorBrowser, {"webdriver_url": self.server.url}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/firefox.py
+++ /dev/null
@@ -1,354 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import platform
-import signal
-import subprocess
-import sys
-
-import mozinfo
-import mozleak
-from mozprocess import ProcessHandler
-from mozprofile import FirefoxProfile, Preferences
-from mozprofile.permissions import ServerLocations
-from mozrunner import FirefoxRunner
-from mozrunner.utils import get_stack_fixer_function
-from mozcrash import mozcrash
-
-from .base import (get_free_port,
-                   Browser,
-                   ExecutorBrowser,
-                   require_arg,
-                   cmd_arg,
-                   browser_command)
-from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executormarionette import (MarionetteTestharnessExecutor,
-                                            MarionetteRefTestExecutor,
-                                            MarionetteWdspecExecutor)
-from ..environment import hostnames
-
-
-here = os.path.join(os.path.split(__file__)[0])
-
-__wptrunner__ = {"product": "firefox",
-                 "check_args": "check_args",
-                 "browser": "FirefoxBrowser",
-                 "executor": {"testharness": "MarionetteTestharnessExecutor",
-                              "reftest": "MarionetteRefTestExecutor",
-                              "wdspec": "MarionetteWdspecExecutor"},
-                 "browser_kwargs": "browser_kwargs",
-                 "executor_kwargs": "executor_kwargs",
-                 "env_options": "env_options",
-                 "run_info_extras": "run_info_extras",
-                 "update_properties": "update_properties"}
-
-
-def get_timeout_multiplier(test_type, run_info_data, **kwargs):
-    if kwargs["timeout_multiplier"] is not None:
-        return kwargs["timeout_multiplier"]
-    if test_type == "reftest":
-        if run_info_data["debug"] or run_info_data.get("asan"):
-            return 4
-        else:
-            return 2
-    elif run_info_data["debug"] or run_info_data.get("asan"):
-        return 3
-    return 1
-
-
-def check_args(**kwargs):
-    require_arg(kwargs, "binary")
-    if kwargs["ssl_type"] != "none":
-        require_arg(kwargs, "certutil_binary")
-
-
-def browser_kwargs(test_type, run_info_data, **kwargs):
-    return {"binary": kwargs["binary"],
-            "prefs_root": kwargs["prefs_root"],
-            "extra_prefs": kwargs["extra_prefs"],
-            "debug_info": kwargs["debug_info"],
-            "symbols_path": kwargs["symbols_path"],
-            "stackwalk_binary": kwargs["stackwalk_binary"],
-            "certutil_binary": kwargs["certutil_binary"],
-            "ca_certificate_path": kwargs["ssl_env"].ca_cert_path(),
-            "e10s": kwargs["gecko_e10s"],
-            "stackfix_dir": kwargs["stackfix_dir"],
-            "binary_args": kwargs["binary_args"],
-            "timeout_multiplier": get_timeout_multiplier(test_type,
-                                                         run_info_data,
-                                                         **kwargs),
-            "leak_check": kwargs["leak_check"]}
-
-
-def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
-                    **kwargs):
-    executor_kwargs = base_executor_kwargs(test_type, server_config,
-                                           cache_manager, **kwargs)
-    executor_kwargs["close_after_done"] = test_type != "reftest"
-    executor_kwargs["timeout_multiplier"] = get_timeout_multiplier(test_type,
-                                                                   run_info_data,
-                                                                   **kwargs)
-    if test_type == "wdspec":
-        executor_kwargs["webdriver_binary"] = kwargs.get("webdriver_binary")
-        fxOptions = {}
-        if kwargs["binary"]:
-            fxOptions["binary"] = kwargs["binary"]
-        if kwargs["binary_args"]:
-            fxOptions["args"] = kwargs["binary_args"]
-        fxOptions["prefs"] = {
-            "network.dns.localDomains": ",".join(hostnames)
-        }
-        capabilities = {"moz:firefoxOptions": fxOptions}
-        executor_kwargs["capabilities"] = capabilities
-    return executor_kwargs
-
-
-def env_options():
-    return {"host": "127.0.0.1",
-            "external_host": "web-platform.test",
-            "bind_hostname": "false",
-            "certificate_domain": "web-platform.test",
-            "supports_debugger": True}
-
-
-def run_info_extras(**kwargs):
-    return {"e10s": kwargs["gecko_e10s"]}
-
-
-def update_properties():
-    return ["debug", "e10s", "os", "version", "processor", "bits"], {"debug", "e10s"}
-
-
-class FirefoxBrowser(Browser):
-    used_ports = set()
-    init_timeout = 60
-    shutdown_timeout = 60
-
-    def __init__(self, logger, binary, prefs_root, extra_prefs=None, debug_info=None,
-                 symbols_path=None, stackwalk_binary=None, certutil_binary=None,
-                 ca_certificate_path=None, e10s=False, stackfix_dir=None,
-                 binary_args=None, timeout_multiplier=None, leak_check=False):
-        Browser.__init__(self, logger)
-        self.binary = binary
-        self.prefs_root = prefs_root
-        self.extra_prefs = extra_prefs
-        self.marionette_port = None
-        self.runner = None
-        self.debug_info = debug_info
-        self.profile = None
-        self.symbols_path = symbols_path
-        self.stackwalk_binary = stackwalk_binary
-        self.ca_certificate_path = ca_certificate_path
-        self.certutil_binary = certutil_binary
-        self.e10s = e10s
-        self.binary_args = binary_args
-        if self.symbols_path and stackfix_dir:
-            self.stack_fixer = get_stack_fixer_function(stackfix_dir,
-                                                        self.symbols_path)
-        else:
-            self.stack_fixer = None
-
-        if timeout_multiplier:
-            self.init_timeout = self.init_timeout * timeout_multiplier
-
-        self.leak_report_file = None
-        self.leak_check = leak_check
-
-    def settings(self, test):
-        return {"check_leaks": self.leak_check and not test.leaks}
-
-    def start(self, **kwargs):
-        if self.marionette_port is None:
-            self.marionette_port = get_free_port(2828, exclude=self.used_ports)
-            self.used_ports.add(self.marionette_port)
-
-        env = os.environ.copy()
-        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
-
-        locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
-
-        preferences = self.load_prefs()
-
-        self.profile = FirefoxProfile(locations=locations,
-                                      preferences=preferences)
-        self.profile.set_preferences({"marionette.port": self.marionette_port,
-                                      "dom.disable_open_during_load": False,
-                                      "network.dns.localDomains": ",".join(hostnames),
-                                      "network.proxy.type": 0,
-                                      "places.history.enabled": False})
-        if self.e10s:
-            self.profile.set_preferences({"browser.tabs.remote.autostart": True})
-
-        if self.leak_check and kwargs.get("check_leaks", True):
-            self.leak_report_file = os.path.join(self.profile.profile, "runtests_leaks.log")
-            if os.path.exists(self.leak_report_file):
-                os.remove(self.leak_report_file)
-            env["XPCOM_MEM_BLOAT_LOG"] = self.leak_report_file
-        else:
-            self.leak_report_file = None
-
-        # Bug 1262954: winxp + e10s, disable hwaccel
-        if (self.e10s and platform.system() in ("Windows", "Microsoft") and
-            '5.1' in platform.version()):
-            self.profile.set_preferences({"layers.acceleration.disabled": True})
-
-        if self.ca_certificate_path is not None:
-            self.setup_ssl()
-
-        debug_args, cmd = browser_command(self.binary,
-                                          self.binary_args if self.binary_args else [] +
-                                          [cmd_arg("marionette"), "about:blank"],
-                                          self.debug_info)
-
-        self.runner = FirefoxRunner(profile=self.profile,
-                                    binary=cmd[0],
-                                    cmdargs=cmd[1:],
-                                    env=env,
-                                    process_class=ProcessHandler,
-                                    process_args={"processOutputLine": [self.on_output]})
-
-        self.logger.debug("Starting Firefox")
-
-        self.runner.start(debug_args=debug_args, interactive=self.debug_info and self.debug_info.interactive)
-        self.logger.debug("Firefox Started")
-
-    def load_prefs(self):
-        prefs = Preferences()
-
-        prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
-        if os.path.exists(prefs_path):
-            prefs.add(Preferences.read_prefs(prefs_path))
-        else:
-            self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
-
-        # Add any custom preferences
-        prefs.add(self.extra_prefs, cast=True)
-
-        return prefs()
-
-    def stop(self, force=False):
-        if self.runner is not None and self.runner.is_running():
-            try:
-                # For Firefox we assume that stopping the runner prompts the
-                # browser to shut down. This allows the leak log to be written
-                for clean, stop_f in [(True, lambda: self.runner.wait(self.shutdown_timeout)),
-                                      (False, lambda: self.runner.stop(signal.SIGTERM)),
-                                      (False, lambda: self.runner.stop(signal.SIGKILL))]:
-                    if not force or not clean:
-                        retcode = stop_f()
-                        if retcode is not None:
-                            self.logger.info("Browser exited with return code %s" % retcode)
-                            break
-            except OSError:
-                # This can happen on Windows if the process is already dead
-                pass
-        self.logger.debug("stopped")
-
-    def process_leaks(self):
-        self.logger.debug("PROCESS LEAKS %s" % self.leak_report_file)
-        if self.leak_report_file is None:
-            return
-        mozleak.process_leak_log(
-            self.leak_report_file,
-            leak_thresholds={
-                "default": 0,
-                "tab": 10000,  # See dependencies of bug 1051230.
-                # GMP rarely gets a log, but when it does, it leaks a little.
-                "geckomediaplugin": 20000,
-            },
-            ignore_missing_leaks=["geckomediaplugin"],
-            log=self.logger,
-            stack_fixer=self.stack_fixer
-        )
-
-    def pid(self):
-        if self.runner.process_handler is None:
-            return None
-
-        try:
-            return self.runner.process_handler.pid
-        except AttributeError:
-            return None
-
-    def on_output(self, line):
-        """Write a line of output from the firefox process to the log"""
-        data = line.decode("utf8", "replace")
-        if self.stack_fixer:
-            data = self.stack_fixer(data)
-        self.logger.process_output(self.pid(),
-                                   data,
-                                   command=" ".join(self.runner.command))
-
-    def is_alive(self):
-        if self.runner:
-            return self.runner.is_running()
-        return False
-
-    def cleanup(self):
-        self.stop()
-        self.process_leaks()
-
-    def executor_browser(self):
-        assert self.marionette_port is not None
-        return ExecutorBrowser, {"marionette_port": self.marionette_port}
-
-    def log_crash(self, process, test):
-        dump_dir = os.path.join(self.profile.profile, "minidumps")
-
-        mozcrash.log_crashes(self.logger,
-                             dump_dir,
-                             symbols_path=self.symbols_path,
-                             stackwalk_binary=self.stackwalk_binary,
-                             process=process,
-                             test=test)
-
-    def setup_ssl(self):
-        """Create a certificate database to use in the test profile. This is configured
-        to trust the CA Certificate that has signed the web-platform.test server
-        certificate."""
-
-        self.logger.info("Setting up ssl")
-
-        # Make sure the certutil libraries from the source tree are loaded when using a
-        # local copy of certutil
-        # TODO: Maybe only set this if certutil won't launch?
-        env = os.environ.copy()
-        certutil_dir = os.path.dirname(self.binary)
-        if mozinfo.isMac:
-            env_var = "DYLD_LIBRARY_PATH"
-        elif mozinfo.isUnix:
-            env_var = "LD_LIBRARY_PATH"
-        else:
-            env_var = "PATH"
-
-
-        env[env_var] = (os.path.pathsep.join([certutil_dir, env[env_var]])
-                        if env_var in env else certutil_dir).encode(
-                                sys.getfilesystemencoding() or 'utf-8', 'replace')
-
-        def certutil(*args):
-            cmd = [self.certutil_binary] + list(args)
-            self.logger.process_output("certutil",
-                                       subprocess.check_output(cmd,
-                                                               env=env,
-                                                               stderr=subprocess.STDOUT),
-                                       " ".join(cmd))
-
-        pw_path = os.path.join(self.profile.profile, ".crtdbpw")
-        with open(pw_path, "w") as f:
-            # Use empty password for certificate db
-            f.write("\n")
-
-        cert_db_path = self.profile.profile
-
-        # Create a new certificate db
-        certutil("-N", "-d", cert_db_path, "-f", pw_path)
-
-        # Add the CA certificate to the database and mark as trusted to issue server certs
-        certutil("-A", "-d", cert_db_path, "-f", pw_path, "-t", "CT,,",
-                 "-n", "web-platform-tests", "-i", self.ca_certificate_path)
-
-        # List all certs in the database
-        certutil("-L", "-d", cert_db_path)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/server-locations.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-#
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-# See /build/pgo/server-locations.txt for documentation on the format
-
-http://localhost:8000    primary
-
-http://web-platform.test:8000
-http://www.web-platform.test:8000
-http://www1.web-platform.test:8000
-http://www2.web-platform.test:8000
-http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8000
-http://xn--lve-6lad.web-platform.test:8000
-
-http://web-platform.test:8001
-http://www.web-platform.test:8001
-http://www1.web-platform.test:8001
-http://www2.web-platform.test:8001
-http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8001
-http://xn--lve-6lad.web-platform.test:8001
-
-https://web-platform.test:8443
-https://www.web-platform.test:8443
-https://www1.web-platform.test:8443
-https://www2.web-platform.test:8443
-https://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8443
-https://xn--lve-6lad.web-platform.test:8443
-
-# These are actually ws servers, but until mozprofile is
-# fixed we have to pretend that they are http servers
-http://web-platform.test:8888
-http://www.web-platform.test:8888
-http://www1.web-platform.test:8888
-http://www2.web-platform.test:8888
-http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8888
-http://xn--lve-6lad.web-platform.test:8888
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/servo.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-
-from .base import NullBrowser, ExecutorBrowser, require_arg
-from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorservo import ServoTestharnessExecutor, ServoRefTestExecutor, ServoWdspecExecutor
-
-here = os.path.join(os.path.split(__file__)[0])
-
-__wptrunner__ = {
-    "product": "servo",
-    "check_args": "check_args",
-    "browser": "ServoBrowser",
-    "executor": {
-        "testharness": "ServoTestharnessExecutor",
-        "reftest": "ServoRefTestExecutor",
-        "wdspec": "ServoWdspecExecutor",
-    },
-    "browser_kwargs": "browser_kwargs",
-    "executor_kwargs": "executor_kwargs",
-    "env_options": "env_options",
-    "update_properties": "update_properties",
-}
-
-
-def check_args(**kwargs):
-    require_arg(kwargs, "binary")
-
-
-def browser_kwargs(test_type, run_info_data, **kwargs):
-    return {
-        "binary": kwargs["binary"],
-        "debug_info": kwargs["debug_info"],
-        "binary_args": kwargs["binary_args"],
-        "user_stylesheets": kwargs.get("user_stylesheets"),
-    }
-
-
-def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
-                    **kwargs):
-    rv = base_executor_kwargs(test_type, server_config,
-                              cache_manager, **kwargs)
-    rv["pause_after_test"] = kwargs["pause_after_test"]
-    return rv
-
-
-def env_options():
-    return {"host": "127.0.0.1",
-            "external_host": "web-platform.test",
-            "bind_hostname": "true",
-            "testharnessreport": "testharnessreport-servo.js",
-            "supports_debugger": True}
-
-
-def update_properties():
-    return ["debug", "os", "version", "processor", "bits"], None
-
-
-class ServoBrowser(NullBrowser):
-    def __init__(self, logger, binary, debug_info=None, binary_args=None,
-                 user_stylesheets=None):
-        NullBrowser.__init__(self, logger)
-        self.binary = binary
-        self.debug_info = debug_info
-        self.binary_args = binary_args or []
-        self.user_stylesheets = user_stylesheets or []
-
-    def executor_browser(self):
-        return ExecutorBrowser, {
-            "binary": self.binary,
-            "debug_info": self.debug_info,
-            "binary_args": self.binary_args,
-            "user_stylesheets": self.user_stylesheets,
-        }
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/browsers/servodriver.py
+++ /dev/null
@@ -1,164 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import subprocess
-import tempfile
-
-from mozprocess import ProcessHandler
-
-from .base import Browser, require_arg, get_free_port, browser_command, ExecutorBrowser
-from ..executors import executor_kwargs as base_executor_kwargs
-from ..executors.executorservodriver import (ServoWebDriverTestharnessExecutor,
-                                             ServoWebDriverRefTestExecutor)
-
-here = os.path.join(os.path.split(__file__)[0])
-
-__wptrunner__ = {
-    "product": "servodriver",
-    "check_args": "check_args",
-    "browser": "ServoWebDriverBrowser",
-    "executor": {
-        "testharness": "ServoWebDriverTestharnessExecutor",
-        "reftest": "ServoWebDriverRefTestExecutor",
-    },
-    "browser_kwargs": "browser_kwargs",
-    "executor_kwargs": "executor_kwargs",
-    "env_options": "env_options",
-    "update_properties": "update_properties",
-}
-
-hosts_text = """127.0.0.1 web-platform.test
-127.0.0.1 www.web-platform.test
-127.0.0.1 www1.web-platform.test
-127.0.0.1 www2.web-platform.test
-127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
-127.0.0.1 xn--lve-6lad.web-platform.test
-"""
-
-
-def check_args(**kwargs):
-    require_arg(kwargs, "binary")
-
-
-def browser_kwargs(test_type, run_info_data, **kwargs):
-    return {
-        "binary": kwargs["binary"],
-        "debug_info": kwargs["debug_info"],
-        "user_stylesheets": kwargs.get("user_stylesheets"),
-    }
-
-
-def executor_kwargs(test_type, server_config, cache_manager, run_info_data, **kwargs):
-    rv = base_executor_kwargs(test_type, server_config,
-                              cache_manager, **kwargs)
-    return rv
-
-
-def env_options():
-    return {"host": "127.0.0.1",
-            "external_host": "web-platform.test",
-            "bind_hostname": "true",
-            "testharnessreport": "testharnessreport-servodriver.js",
-            "supports_debugger": True}
-
-
-def update_properties():
-    return ["debug", "os", "version", "processor", "bits"], None
-
-
-def make_hosts_file():
-    hosts_fd, hosts_path = tempfile.mkstemp()
-    with os.fdopen(hosts_fd, "w") as f:
-        f.write(hosts_text)
-    return hosts_path
-
-
-class ServoWebDriverBrowser(Browser):
-    used_ports = set()
-
-    def __init__(self, logger, binary, debug_info=None, webdriver_host="127.0.0.1",
-                 user_stylesheets=None):
-        Browser.__init__(self, logger)
-        self.binary = binary
-        self.webdriver_host = webdriver_host
-        self.webdriver_port = None
-        self.proc = None
-        self.debug_info = debug_info
-        self.hosts_path = make_hosts_file()
-        self.command = None
-        self.user_stylesheets = user_stylesheets if user_stylesheets else []
-
-    def start(self, **kwargs):
-        self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
-        self.used_ports.add(self.webdriver_port)
-
-        env = os.environ.copy()
-        env["HOST_FILE"] = self.hosts_path
-        env["RUST_BACKTRACE"] = "1"
-
-        debug_args, command = browser_command(
-            self.binary,
-            [
-                "--hard-fail",
-                "--webdriver", str(self.webdriver_port),
-                "about:blank",
-            ],
-            self.debug_info
-        )
-
-        for stylesheet in self.user_stylesheets:
-            command += ["--user-stylesheet", stylesheet]
-
-        self.command = command
-
-        self.command = debug_args + self.command
-
-        if not self.debug_info or not self.debug_info.interactive:
-            self.proc = ProcessHandler(self.command,
-                                       processOutputLine=[self.on_output],
-                                       env=env,
-                                       storeOutput=False)
-            self.proc.run()
-        else:
-            self.proc = subprocess.Popen(self.command, env=env)
-
-        self.logger.debug("Servo Started")
-
-    def stop(self, force=False):
-        self.logger.debug("Stopping browser")
-        if self.proc is not None:
-            try:
-                self.proc.kill()
-            except OSError:
-                # This can happen on Windows if the process is already dead
-                pass
-
-    def pid(self):
-        if self.proc is None:
-            return None
-
-        try:
-            return self.proc.pid
-        except AttributeError:
-            return None
-
-    def on_output(self, line):
-        """Write a line of output from the process to the log"""
-        self.logger.process_output(self.pid(),
-                                   line.decode("utf8", "replace"),
-                                   command=" ".join(self.command))
-
-    def is_alive(self):
-        if self.runner:
-            return self.runner.is_running()
-        return False
-
-    def cleanup(self):
-        self.stop()
-
-    def executor_browser(self):
-        assert self.webdriver_port is not None
-        return ExecutorBrowser, {"webdriver_host": self.webdriver_host,
-                                 "webdriver_port": self.webdriver_port}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/config.py
+++ /dev/null
@@ -1,64 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import ConfigParser
-import os
-import sys
-from collections import OrderedDict
-
-here = os.path.split(__file__)[0]
-
-class ConfigDict(dict):
-    def __init__(self, base_path, *args, **kwargs):
-        self.base_path = base_path
-        dict.__init__(self, *args, **kwargs)
-
-    def get_path(self, key, default=None):
-        if key not in self:
-            return default
-        path = self[key]
-        os.path.expanduser(path)
-        return os.path.abspath(os.path.join(self.base_path, path))
-
-def read(config_path):
-    config_path = os.path.abspath(config_path)
-    config_root = os.path.split(config_path)[0]
-    parser = ConfigParser.SafeConfigParser()
-    success = parser.read(config_path)
-    assert config_path in success, success
-
-    subns = {"pwd": os.path.abspath(os.path.curdir)}
-
-    rv = OrderedDict()
-    for section in parser.sections():
-        rv[section] = ConfigDict(config_root)
-        for key in parser.options(section):
-            rv[section][key] = parser.get(section, key, False, subns)
-
-    return rv
-
-def path(argv=None):
-    if argv is None:
-        argv = []
-    path = None
-
-    for i, arg in enumerate(argv):
-        if arg == "--config":
-            if i + 1 < len(argv):
-                path = argv[i + 1]
-        elif arg.startswith("--config="):
-            path = arg.split("=", 1)[1]
-        if path is not None:
-            break
-
-    if path is None:
-        if os.path.exists("wptrunner.ini"):
-            path = os.path.abspath("wptrunner.ini")
-        else:
-            path = os.path.join(here, "..", "wptrunner.default.ini")
-
-    return os.path.abspath(path)
-
-def load():
-    return read(path(sys.argv))
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/environment.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import json
-import os
-import multiprocessing
-import signal
-import socket
-import sys
-import time
-
-from mozlog import get_default_logger, handlers
-
-from wptlogging import LogLevelRewriter
-
-here = os.path.split(__file__)[0]
-
-serve = None
-sslutils = None
-
-
-hostnames = ["web-platform.test",
-             "www.web-platform.test",
-             "www1.web-platform.test",
-             "www2.web-platform.test",
-             "xn--n8j6ds53lwwkrqhv28a.web-platform.test",
-             "xn--lve-6lad.web-platform.test"]
-
-
-def do_delayed_imports(logger, test_paths):
-    global serve, sslutils
-
-    serve_root = serve_path(test_paths)
-    sys.path.insert(0, serve_root)
-
-    failed = []
-
-    try:
-        from tools.serve import serve
-    except ImportError:
-        from wpt_tools.serve import serve
-    except ImportError:
-        failed.append("serve")
-
-    try:
-        import sslutils
-    except ImportError:
-        failed.append("sslutils")
-
-    if failed:
-        logger.critical(
-            "Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
-            (", ".join(failed), serve_root))
-        sys.exit(1)
-
-
-def serve_path(test_paths):
-    return test_paths["/"]["tests_path"]
-
-
-def get_ssl_kwargs(**kwargs):
-    if kwargs["ssl_type"] == "openssl":
-        args = {"openssl_binary": kwargs["openssl_binary"]}
-    elif kwargs["ssl_type"] == "pregenerated":
-        args = {"host_key_path": kwargs["host_key_path"],
-                "host_cert_path": kwargs["host_cert_path"],
-                 "ca_cert_path": kwargs["ca_cert_path"]}
-    else:
-        args = {}
-    return args
-
-
-def ssl_env(logger, **kwargs):
-    ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
-    return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
-
-
-class TestEnvironmentError(Exception):
-    pass
-
-
-class TestEnvironment(object):
-    def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options):
-        """Context manager that owns the test environment i.e. the http and
-        websockets servers"""
-        self.test_paths = test_paths
-        self.ssl_env = ssl_env
-        self.server = None
-        self.config = None
-        self.external_config = None
-        self.pause_after_test = pause_after_test
-        self.test_server_port = options.pop("test_server_port", True)
-        self.debug_info = debug_info
-        self.options = options if options is not None else {}
-
-        self.cache_manager = multiprocessing.Manager()
-        self.stash = serve.stash.StashServer()
-
-
-    def __enter__(self):
-        self.stash.__enter__()
-        self.ssl_env.__enter__()
-        self.cache_manager.__enter__()
-        self.setup_server_logging()
-        self.config = self.load_config()
-        serve.set_computed_defaults(self.config)
-        self.external_config, self.servers = serve.start(self.config, self.ssl_env,
-                                                         self.get_routes())
-        if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
-            self.ignore_interrupts()
-        return self
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        self.process_interrupts()
-        for scheme, servers in self.servers.iteritems():
-            for port, server in servers:
-                server.kill()
-        self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
-        self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
-        self.stash.__exit__()
-
-    def ignore_interrupts(self):
-        signal.signal(signal.SIGINT, signal.SIG_IGN)
-
-    def process_interrupts(self):
-        signal.signal(signal.SIGINT, signal.SIG_DFL)
-
-    def load_config(self):
-        default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
-        local_config_path = os.path.join(here, "config.json")
-
-        with open(default_config_path) as f:
-            default_config = json.load(f)
-
-        with open(local_config_path) as f:
-            data = f.read()
-            local_config = json.loads(data % self.options)
-
-        #TODO: allow non-default configuration for ssl
-
-        local_config["external_host"] = self.options.get("external_host", None)
-        local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
-
-        config = serve.merge_json(default_config, local_config)
-        config["doc_root"] = serve_path(self.test_paths)
-
-        if not self.ssl_env.ssl_enabled:
-            config["ports"]["https"] = [None]
-
-        host = self.options.get("certificate_domain", config["host"])
-        hosts = [host]
-        hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
-        key_file, certificate = self.ssl_env.host_cert_path(hosts)
-
-        config["key_file"] = key_file
-        config["certificate"] = certificate
-
-        return config
-
-    def setup_server_logging(self):
-        server_logger = get_default_logger(component="wptserve")
-        assert server_logger is not None
-        log_filter = handlers.LogLevelFilter(lambda x:x, "info")
-        # Downgrade errors to warnings for the server
-        log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
-        server_logger.component_filter = log_filter
-
-        try:
-            #Set as the default logger for wptserve
-            serve.set_logger(server_logger)
-            serve.logger = server_logger
-        except Exception:
-            # This happens if logging has already been set up for wptserve
-            pass
-
-    def get_routes(self):
-        route_builder = serve.RoutesBuilder()
-
-        for path, format_args, content_type, route in [
-                ("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
-                (self.options.get("testharnessreport", "testharnessreport.js"),
-                 {"output": self.pause_after_test}, "text/javascript",
-                 "/resources/testharnessreport.js")]:
-            path = os.path.normpath(os.path.join(here, path))
-            route_builder.add_static(path, format_args, content_type, route)
-
-        for url_base, paths in self.test_paths.iteritems():
-            if url_base == "/":
-                continue
-            route_builder.add_mount_point(url_base, paths["tests_path"])
-
-        if "/" not in self.test_paths:
-            del route_builder.mountpoint_routes["/"]
-
-        return route_builder.get_routes()
-
-    def ensure_started(self):
-        # Pause for a while to ensure that the server has a chance to start
-        time.sleep(2)
-        for scheme, servers in self.servers.iteritems():
-            for port, server in servers:
-                if self.test_server_port:
-                    s = socket.socket()
-                    try:
-                        s.connect((self.config["host"], port))
-                    except socket.error:
-                        raise EnvironmentError(
-                            "%s server on port %d failed to start" % (scheme, port))
-                    finally:
-                        s.close()
-
-                if not server.is_alive():
-                    raise EnvironmentError("%s server on port %d failed to start" % (scheme, port))
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/__init__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from base import (executor_kwargs,
-                  testharness_result_converter,
-                  reftest_result_converter,
-                  TestExecutor)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/base.py
+++ /dev/null
@@ -1,329 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import hashlib
-import json
-import os
-import traceback
-import urlparse
-from abc import ABCMeta, abstractmethod
-
-from ..testrunner import Stop
-
-here = os.path.split(__file__)[0]
-
-
-def executor_kwargs(test_type, server_config, cache_manager, **kwargs):
-    timeout_multiplier = kwargs["timeout_multiplier"]
-    if timeout_multiplier is None:
-        timeout_multiplier = 1
-
-    executor_kwargs = {"server_config": server_config,
-                       "timeout_multiplier": timeout_multiplier,
-                       "debug_info": kwargs["debug_info"]}
-
-    if test_type == "reftest":
-        executor_kwargs["screenshot_cache"] = cache_manager.dict()
-
-    return executor_kwargs
-
-
-def strip_server(url):
-    """Remove the scheme and netloc from a url, leaving only the path and any query
-    or fragment.
-
-    url - the url to strip
-
-    e.g. http://example.org:8000/tests?id=1#2 becomes /tests?id=1#2"""
-
-    url_parts = list(urlparse.urlsplit(url))
-    url_parts[0] = ""
-    url_parts[1] = ""
-    return urlparse.urlunsplit(url_parts)
-
-
-class TestharnessResultConverter(object):
-    harness_codes = {0: "OK",
-                     1: "ERROR",
-                     2: "TIMEOUT"}
-
-    test_codes = {0: "PASS",
-                  1: "FAIL",
-                  2: "TIMEOUT",
-                  3: "NOTRUN"}
-
-    def __call__(self, test, result):
-        """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
-        result_url, status, message, stack, subtest_results = result
-        assert result_url == test.url, ("Got results from %s, expected %s" %
-                                      (result_url, test.url))
-        harness_result = test.result_cls(self.harness_codes[status], message)
-        return (harness_result,
-                [test.subtest_result_cls(name, self.test_codes[status], message, stack)
-                 for name, status, message, stack in subtest_results])
-
-
-testharness_result_converter = TestharnessResultConverter()
-
-
-def reftest_result_converter(self, test, result):
-    return (test.result_cls(result["status"], result["message"],
-                            extra=result.get("extra")), [])
-
-
-def pytest_result_converter(self, test, data):
-    harness_data, subtest_data = data
-
-    if subtest_data is None:
-        subtest_data = []
-
-    harness_result = test.result_cls(*harness_data)
-    subtest_results = [test.subtest_result_cls(*item) for item in subtest_data]
-
-    return (harness_result, subtest_results)
-
-
-class ExecutorException(Exception):
-    def __init__(self, status, message):
-        self.status = status
-        self.message = message
-
-
-class TestExecutor(object):
-    __metaclass__ = ABCMeta
-
-    test_type = None
-    convert_result = None
-
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 debug_info=None):
-        """Abstract Base class for object that actually executes the tests in a
-        specific browser. Typically there will be a different TestExecutor
-        subclass for each test type and method of executing tests.
-
-        :param browser: ExecutorBrowser instance providing properties of the
-                        browser that will be tested.
-        :param server_config: Dictionary of wptserve server configuration of the
-                              form stored in TestEnvironment.external_config
-        :param timeout_multiplier: Multiplier relative to base timeout to use
-                                   when setting test timeout.
-        """
-        self.runner = None
-        self.browser = browser
-        self.server_config = server_config
-        self.timeout_multiplier = timeout_multiplier
-        self.debug_info = debug_info
-        self.last_environment = {"protocol": "http",
-                                 "prefs": {}}
-        self.protocol = None # This must be set in subclasses
-
-    @property
-    def logger(self):
-        """StructuredLogger for this executor"""
-        if self.runner is not None:
-            return self.runner.logger
-
-    def setup(self, runner):
-        """Run steps needed before tests can be started e.g. connecting to
-        browser instance
-
-        :param runner: TestRunner instance that is going to run the tests"""
-        self.runner = runner
-        if self.protocol is not None:
-            self.protocol.setup(runner)
-
-    def teardown(self):
-        """Run cleanup steps after tests have finished"""
-        if self.protocol is not None:
-            self.protocol.teardown()
-
-    def run_test(self, test):
-        """Run a particular test.
-
-        :param test: The test to run"""
-        if test.environment != self.last_environment:
-            self.on_environment_change(test.environment)
-
-        try:
-            result = self.do_test(test)
-        except Exception as e:
-            result = self.result_from_exception(test, e)
-
-        if result is Stop:
-            return result
-
-        # log result of parent test
-        if result[0].status == "ERROR":
-            self.logger.debug(result[0].message)
-
-        self.last_environment = test.environment
-
-        self.runner.send_message("test_ended", test, result)
-
-    def server_url(self, protocol):
-        return "%s://%s:%s" % (protocol,
-                               self.server_config["host"],
-                               self.server_config["ports"][protocol][0])
-
-    def test_url(self, test):
-        return urlparse.urljoin(self.server_url(test.environment["protocol"]), test.url)
-
-    @abstractmethod
-    def do_test(self, test):
-        """Test-type and protocol specific implementation of running a
-        specific test.
-
-        :param test: The test to run."""
-        pass
-
-    def on_environment_change(self, new_environment):
-        pass
-
-    def result_from_exception(self, test, e):
-        if hasattr(e, "status") and e.status in test.result_cls.statuses:
-            status = e.status
-        else:
-            status = "ERROR"
-        message = unicode(getattr(e, "message", ""))
-        if message:
-            message += "\n"
-        message += traceback.format_exc(e)
-        return test.result_cls(status, message), []
-
-
-class TestharnessExecutor(TestExecutor):
-    convert_result = testharness_result_converter
-
-
-class RefTestExecutor(TestExecutor):
-    convert_result = reftest_result_converter
-
-    def __init__(self, browser, server_config, timeout_multiplier=1, screenshot_cache=None,
-                 debug_info=None):
-        TestExecutor.__init__(self, browser, server_config,
-                              timeout_multiplier=timeout_multiplier,
-                              debug_info=debug_info)
-
-        self.screenshot_cache = screenshot_cache
-
-
-class RefTestImplementation(object):
-    def __init__(self, executor):
-        self.timeout_multiplier = executor.timeout_multiplier
-        self.executor = executor
-        # Cache of url:(screenshot hash, screenshot). Typically the
-        # screenshot is None, but we set this value if a test fails
-        # and the screenshot was taken from the cache so that we may
-        # retrieve the screenshot from the cache directly in the future
-        self.screenshot_cache = self.executor.screenshot_cache
-        self.message = None
-
-    @property
-    def logger(self):
-        return self.executor.logger
-
-    def get_hash(self, test, viewport_size, dpi):
-        timeout = test.timeout * self.timeout_multiplier
-        key = (test.url, viewport_size, dpi)
-
-        if key not in self.screenshot_cache:
-            success, data = self.executor.screenshot(test, viewport_size, dpi)
-
-            if not success:
-                return False, data
-
-            screenshot = data
-            hash_value = hashlib.sha1(screenshot).hexdigest()
-
-            self.screenshot_cache[key] = (hash_value, None)
-
-            rv = (hash_value, screenshot)
-        else:
-            rv = self.screenshot_cache[key]
-
-        self.message.append("%s %s" % (test.url, rv[0]))
-        return True, rv
-
-    def is_pass(self, lhs_hash, rhs_hash, relation):
-        assert relation in ("==", "!=")
-        self.message.append("Testing %s %s %s" % (lhs_hash, relation, rhs_hash))
-        return ((relation == "==" and lhs_hash == rhs_hash) or
-                (relation == "!=" and lhs_hash != rhs_hash))
-
-    def run_test(self, test):
-        viewport_size = test.viewport_size
-        dpi = test.dpi
-        self.message = []
-
-        # Depth-first search of reference tree, with the goal
-        # of reachings a leaf node with only pass results
-
-        stack = list(((test, item[0]), item[1]) for item in reversed(test.references))
-        while stack:
-            hashes = [None, None]
-            screenshots = [None, None]
-
-            nodes, relation = stack.pop()
-
-            for i, node in enumerate(nodes):
-                success, data = self.get_hash(node, viewport_size, dpi)
-                if success is False:
-                    return {"status": data[0], "message": data[1]}
-
-                hashes[i], screenshots[i] = data
-
-            if self.is_pass(hashes[0], hashes[1], relation):
-                if nodes[1].references:
-                    stack.extend(list(((nodes[1], item[0]), item[1]) for item in reversed(nodes[1].references)))
-                else:
-                    # We passed
-                    return {"status":"PASS", "message": None}
-
-        # We failed, so construct a failure message
-
-        for i, (node, screenshot) in enumerate(zip(nodes, screenshots)):
-            if screenshot is None:
-                success, screenshot = self.retake_screenshot(node, viewport_size, dpi)
-                if success:
-                    screenshots[i] = screenshot
-
-        log_data = [{"url": nodes[0].url, "screenshot": screenshots[0]}, relation,
-                    {"url": nodes[1].url, "screenshot": screenshots[1]}]
-
-        return {"status": "FAIL",
-                "message": "\n".join(self.message),
-                "extra": {"reftest_screenshots": log_data}}
-
-    def retake_screenshot(self, node, viewport_size, dpi):
-        success, data = self.executor.screenshot(node, viewport_size, dpi)
-        if not success:
-            return False, data
-
-        key = (node.url, viewport_size, dpi)
-        hash_val, _ = self.screenshot_cache[key]
-        self.screenshot_cache[key] = hash_val, data
-        return True, data
-
-
-class WdspecExecutor(TestExecutor):
-    convert_result = pytest_result_converter
-
-
-class Protocol(object):
-    def __init__(self, executor, browser):
-        self.executor = executor
-        self.browser = browser
-
-    @property
-    def logger(self):
-        return self.executor.logger
-
-    def setup(self, runner):
-        pass
-
-    def teardown(self):
-        pass
-
-    def wait(self):
-        pass
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/executormarionette.py
+++ /dev/null
@@ -1,621 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import hashlib
-import httplib
-import os
-import socket
-import threading
-import time
-import traceback
-import urlparse
-import uuid
-from collections import defaultdict
-
-from ..wpttest import WdspecResult, WdspecSubtestResult
-
-errors = None
-marionette = None
-pytestrunner = None
-
-here = os.path.join(os.path.split(__file__)[0])
-
-from .base import (ExecutorException,
-                   Protocol,
-                   RefTestExecutor,
-                   RefTestImplementation,
-                   TestExecutor,
-                   TestharnessExecutor,
-                   testharness_result_converter,
-                   reftest_result_converter,
-                   strip_server,
-                   WdspecExecutor)
-from ..testrunner import Stop
-from ..webdriver_server import GeckoDriverServer
-
-# Extra timeout to use after internal test timeout at which the harness
-# should force a timeout
-extra_timeout = 5 # seconds
-
-
-def do_delayed_imports():
-    global errors, marionette
-
-    # Marionette client used to be called marionette, recently it changed
-    # to marionette_driver for unfathomable reasons
-    try:
-        import marionette
-        from marionette import errors
-    except ImportError:
-        from marionette_driver import marionette, errors
-
-
-class MarionetteProtocol(Protocol):
-    def __init__(self, executor, browser, timeout_multiplier=1):
-        do_delayed_imports()
-
-        Protocol.__init__(self, executor, browser)
-        self.marionette = None
-        self.marionette_port = browser.marionette_port
-        self.timeout_multiplier = timeout_multiplier
-        self.timeout = None
-        self.runner_handle = None
-
-    def setup(self, runner):
-        """Connect to browser via Marionette."""
-        Protocol.setup(self, runner)
-
-        self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
-        startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
-        self.marionette = marionette.Marionette(host='localhost',
-                                                port=self.marionette_port,
-                                                socket_timeout=None,
-                                                startup_timeout=startup_timeout)
-
-        # XXX Move this timeout somewhere
-        self.logger.debug("Waiting for Marionette connection")
-        while True:
-            success = self.marionette.wait_for_port(60 * self.timeout_multiplier)
-            #When running in a debugger wait indefinitely for firefox to start
-            if success or self.executor.debug_info is None:
-                break
-
-        session_started = False
-        if success:
-            try:
-                self.logger.debug("Starting Marionette session")
-                self.marionette.start_session()
-            except Exception as e:
-                self.logger.warning("Starting marionette session failed: %s" % e)
-            else:
-                self.logger.debug("Marionette session started")
-                session_started = True
-
-        if not success or not session_started:
-            self.logger.warning("Failed to connect to Marionette")
-            self.executor.runner.send_message("init_failed")
-        else:
-            try:
-                self.after_connect()
-            except Exception:
-                self.logger.warning("Post-connection steps failed")
-                self.logger.error(traceback.format_exc())
-                self.executor.runner.send_message("init_failed")
-            else:
-                self.executor.runner.send_message("init_succeeded")
-
-    def teardown(self):
-        try:
-            self.marionette._request_in_app_shutdown()
-            self.marionette.delete_session(send_request=False, reset_session_id=True)
-        except Exception:
-            # This is typically because the session never started
-            pass
-        if self.marionette is not None:
-            del self.marionette
-
-    @property
-    def is_alive(self):
-        """Check if the Marionette connection is still active."""
-        try:
-            self.marionette.current_window_handle
-        except Exception:
-            return False
-        return True
-
-    def after_connect(self):
-        self.load_runner(self.executor.last_environment["protocol"])
-
-    def set_timeout(self, timeout):
-        """Set the Marionette script timeout.
-
-        :param timeout: Script timeout in seconds
-
-        """
-        self.marionette.timeout.script = timeout
-        self.timeout = timeout
-
-    def load_runner(self, protocol):
-        # Check if we previously had a test window open, and if we did make sure it's closed
-        self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
-        url = urlparse.urljoin(self.executor.server_url(protocol), "/testharness_runner.html")
-        self.logger.debug("Loading %s" % url)
-        self.runner_handle = self.marionette.current_window_handle
-        try:
-            self.marionette.navigate(url)
-        except Exception as e:
-            self.logger.critical(
-                "Loading initial page %s failed. Ensure that the "
-                "there are no other programs bound to this port and "
-                "that your firewall rules or network setup does not "
-                "prevent access.\e%s" % (url, traceback.format_exc(e)))
-        self.marionette.execute_script(
-            "document.title = '%s'" % threading.current_thread().name.replace("'", '"'))
-
-    def close_old_windows(self, protocol):
-        handles = self.marionette.window_handles
-        runner_handle = None
-        try:
-            handles.remove(self.runner_handle)
-            runner_handle = self.runner_handle
-        except ValueError:
-            # The runner window probably changed id but we can restore it
-            # This isn't supposed to happen, but marionette ids are not yet stable
-            # We assume that the first handle returned corresponds to the runner,
-            # but it hopefully doesn't matter too much if that assumption is
-            # wrong since we reload the runner in that tab anyway.
-            runner_handle = handles.pop(0)
-
-        for handle in handles:
-            self.marionette.switch_to_window(handle)
-            self.marionette.close()
-
-        self.marionette.switch_to_window(runner_handle)
-        if runner_handle != self.runner_handle:
-            self.load_runner(protocol)
-
-    def wait(self):
-        socket_timeout = self.marionette.client.sock.gettimeout()
-        if socket_timeout:
-            self.marionette.timeout.script = socket_timeout / 2
-
-        while True:
-            try:
-                self.marionette.execute_async_script("")
-            except errors.ScriptTimeoutException:
-                self.logger.debug("Script timed out")
-                pass
-            except (socket.timeout, IOError):
-                self.logger.debug("Socket closed")
-                break
-            except Exception as e:
-                self.logger.error(traceback.format_exc(e))
-                break
-
-    def on_environment_change(self, old_environment, new_environment):
-        #Unset all the old prefs
-        for name in old_environment.get("prefs", {}).iterkeys():
-            value = self.executor.original_pref_values[name]
-            if value is None:
-                self.clear_user_pref(name)
-            else:
-                self.set_pref(name, value)
-
-        for name, value in new_environment.get("prefs", {}).iteritems():
-            self.executor.original_pref_values[name] = self.get_pref(name)
-            self.set_pref(name, value)
-
-    def set_pref(self, name, value):
-        if value.lower() not in ("true", "false"):
-            try:
-                int(value)
-            except ValueError:
-                value = "'%s'" % value
-        else:
-            value = value.lower()
-
-        self.logger.info("Setting pref %s (%s)" % (name, value))
-
-        script = """
-            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
-                                          .getService(Components.interfaces.nsIPrefBranch);
-            let pref = '%s';
-            let type = prefInterface.getPrefType(pref);
-            let value = %s;
-            switch(type) {
-                case prefInterface.PREF_STRING:
-                    prefInterface.setCharPref(pref, value);
-                    break;
-                case prefInterface.PREF_BOOL:
-                    prefInterface.setBoolPref(pref, value);
-                    break;
-                case prefInterface.PREF_INT:
-                    prefInterface.setIntPref(pref, value);
-                    break;
-            }
-            """ % (name, value)
-        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
-            self.marionette.execute_script(script)
-
-    def clear_user_pref(self, name):
-        self.logger.info("Clearing pref %s" % (name))
-        script = """
-            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
-                                          .getService(Components.interfaces.nsIPrefBranch);
-            let pref = '%s';
-            prefInterface.clearUserPref(pref);
-            """ % name
-        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
-            self.marionette.execute_script(script)
-
-    def get_pref(self, name):
-        script = """
-            let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
-                                          .getService(Components.interfaces.nsIPrefBranch);
-            let pref = '%s';
-            let type = prefInterface.getPrefType(pref);
-            switch(type) {
-                case prefInterface.PREF_STRING:
-                    return prefInterface.getCharPref(pref);
-                case prefInterface.PREF_BOOL:
-                    return prefInterface.getBoolPref(pref);
-                case prefInterface.PREF_INT:
-                    return prefInterface.getIntPref(pref);
-                case prefInterface.PREF_INVALID:
-                    return null;
-            }
-            """ % name
-        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
-            self.marionette.execute_script(script)
-
-    def clear_origin(self, url):
-        self.logger.info("Clearing origin %s" % (url))
-        script = """
-            let url = '%s';
-            let uri = Components.classes["@mozilla.org/network/io-service;1"]
-                                .getService(Ci.nsIIOService)
-                                .newURI(url);
-            let ssm = Components.classes["@mozilla.org/scriptsecuritymanager;1"]
-                                .getService(Ci.nsIScriptSecurityManager);
-            let principal = ssm.createCodebasePrincipal(uri, {});
-            let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
-                                .getService(Components.interfaces.nsIQuotaManagerService);
-            qms.clearStoragesForPrincipal(principal, "default", true);
-            """ % url
-        with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
-            self.marionette.execute_script(script)
-
-
-class RemoteMarionetteProtocol(Protocol):
-    def __init__(self, executor, browser):
-        do_delayed_imports()
-        Protocol.__init__(self, executor, browser)
-        self.webdriver_binary = executor.webdriver_binary
-        self.capabilities = self.executor.capabilities
-        self.session_config = None
-        self.server = None
-
-    def setup(self, runner):
-        """Connect to browser via the Marionette HTTP server."""
-        try:
-            self.server = GeckoDriverServer(
-                self.logger, binary=self.webdriver_binary)
-            self.server.start(block=False)
-            self.logger.info(
-                "WebDriver HTTP server listening at %s" % self.server.url)
-            self.session_config = {"host": self.server.host,
-                                   "port": self.server.port,
-                                   "capabilities": self.capabilities}
-        except Exception:
-            self.logger.error(traceback.format_exc())
-            self.executor.runner.send_message("init_failed")
-        else:
-            self.executor.runner.send_message("init_succeeded")
-
-    def teardown(self):
-        if self.server is not None and self.server.is_alive:
-            self.server.stop()
-
-    @property
-    def is_alive(self):
-        """Test that the Marionette connection is still alive.
-
-        Because the remote communication happens over HTTP we need to
-        make an explicit request to the remote.  It is allowed for
-        WebDriver spec tests to not have a WebDriver session, since this
-        may be what is tested.
-
-        An HTTP request to an invalid path that results in a 404 is
-        proof enough to us that the server is alive and kicking.
-        """
-        conn = httplib.HTTPConnection(self.server.host, self.server.port)
-        conn.request("HEAD", self.server.base_path + "invalid")
-        res = conn.getresponse()
-        return res.status == 404
-
-
-class ExecuteAsyncScriptRun(object):
-    def __init__(self, logger, func, protocol, url, timeout):
-        self.logger = logger
-        self.result = (None, None)
-        self.protocol = protocol
-        self.marionette = protocol.marionette
-        self.func = func
-        self.url = url
-        self.timeout = timeout
-        self.result_flag = threading.Event()
-
-    def run(self):
-        index = self.url.rfind("/storage/");
-        if index != -1:
-            # Clear storage
-            self.protocol.clear_origin(self.url)
-
-        timeout = self.timeout
-
-        try:
-            if timeout is not None:
-                if timeout + extra_timeout != self.protocol.timeout:
-                    self.protocol.set_timeout(timeout + extra_timeout)
-            else:
-                # We just want it to never time out, really, but marionette doesn't
-                # make that possible. It also seems to time out immediately if the
-                # timeout is set too high. This works at least.
-                self.protocol.set_timeout(2**28 - 1)
-        except IOError:
-            self.logger.error("Lost marionette connection before starting test")
-            return Stop
-
-        executor = threading.Thread(target = self._run)
-        executor.start()
-
-        if timeout is not None:
-            wait_timeout = timeout + 2 * extra_timeout
-        else:
-            wait_timeout = None
-
-        flag = self.result_flag.wait(wait_timeout)
-        if self.result[1] is None:
-            self.logger.debug("Timed out waiting for a result")
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-        return self.result
-
-    def _run(self):
-        try:
-            self.result = True, self.func(self.marionette, self.url, self.timeout)
-        except errors.ScriptTimeoutException:
-            self.logger.debug("Got a marionette timeout")
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-        except (socket.timeout, IOError):
-            # This can happen on a crash
-            # Also, should check after the test if the firefox process is still running
-            # and otherwise ignore any other result and set it to crash
-            self.result = False, ("CRASH", None)
-        except Exception as e:
-            message = getattr(e, "message", "")
-            if message:
-                message += "\n"
-            message += traceback.format_exc(e)
-            self.result = False, ("ERROR", e)
-
-        finally:
-            self.result_flag.set()
-
-
-class MarionetteTestharnessExecutor(TestharnessExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 close_after_done=True, debug_info=None, **kwargs):
-        """Marionette-based executor for testharness.js tests"""
-        TestharnessExecutor.__init__(self, browser, server_config,
-                                     timeout_multiplier=timeout_multiplier,
-                                     debug_info=debug_info)
-
-        self.protocol = MarionetteProtocol(self, browser, timeout_multiplier)
-        self.script = open(os.path.join(here, "testharness_marionette.js")).read()
-        self.close_after_done = close_after_done
-        self.window_id = str(uuid.uuid4())
-
-        self.original_pref_values = {}
-
-        if marionette is None:
-            do_delayed_imports()
-
-    def is_alive(self):
-        return self.protocol.is_alive
-
-    def on_environment_change(self, new_environment):
-        self.protocol.on_environment_change(self.last_environment, new_environment)
-
-        if new_environment["protocol"] != self.last_environment["protocol"]:
-            self.protocol.load_runner(new_environment["protocol"])
-
-    def do_test(self, test):
-        timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
-                   else None)
-
-        success, data = ExecuteAsyncScriptRun(self.logger,
-                                              self.do_testharness,
-                                              self.protocol,
-                                              self.test_url(test),
-                                              timeout).run()
-        if success:
-            return self.convert_result(test, data)
-
-        return (test.result_cls(*data), [])
-
-    def do_testharness(self, marionette, url, timeout):
-        if self.close_after_done:
-            marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
-            self.protocol.close_old_windows(self.protocol)
-
-        if timeout is not None:
-            timeout_ms = str(timeout * 1000)
-        else:
-            timeout_ms = "null"
-
-        script = self.script % {"abs_url": url,
-                                "url": strip_server(url),
-                                "window_id": self.window_id,
-                                "timeout_multiplier": self.timeout_multiplier,
-                                "timeout": timeout_ms,
-                                "explicit_timeout": timeout is None}
-
-        rv = marionette.execute_async_script(script, new_sandbox=False)
-        return rv
-
-
-class MarionetteRefTestExecutor(RefTestExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 screenshot_cache=None, close_after_done=True,
-                 debug_info=None, **kwargs):
-
-        """Marionette-based executor for reftests"""
-        RefTestExecutor.__init__(self,
-                                 browser,
-                                 server_config,
-                                 screenshot_cache=screenshot_cache,
-                                 timeout_multiplier=timeout_multiplier,
-                                 debug_info=debug_info)
-        self.protocol = MarionetteProtocol(self, browser)
-        self.implementation = RefTestImplementation(self)
-        self.close_after_done = close_after_done
-        self.has_window = False
-        self.original_pref_values = {}
-
-        with open(os.path.join(here, "reftest.js")) as f:
-            self.script = f.read()
-        with open(os.path.join(here, "reftest-wait.js")) as f:
-            self.wait_script = f.read()
-
-    def is_alive(self):
-        return self.protocol.is_alive
-
-    def on_environment_change(self, new_environment):
-        self.protocol.on_environment_change(self.last_environment, new_environment)
-
-    def do_test(self, test):
-        if self.close_after_done and self.has_window:
-            self.protocol.marionette.close()
-            self.protocol.marionette.switch_to_window(
-                self.protocol.marionette.window_handles[-1])
-            self.has_window = False
-
-        if not self.has_window:
-            self.protocol.marionette.execute_script(self.script)
-            self.protocol.marionette.switch_to_window(self.protocol.marionette.window_handles[-1])
-            self.has_window = True
-
-        result = self.implementation.run_test(test)
-        return self.convert_result(test, result)
-
-    def screenshot(self, test, viewport_size, dpi):
-        # https://github.com/w3c/wptrunner/issues/166
-        assert viewport_size is None
-        assert dpi is None
-
-        timeout =  self.timeout_multiplier * test.timeout if self.debug_info is None else None
-
-        test_url = self.test_url(test)
-
-        return ExecuteAsyncScriptRun(self.logger,
-                             self._screenshot,
-                             self.protocol,
-                             test_url,
-                             timeout).run()
-
-    def _screenshot(self, marionette, url, timeout):
-        marionette.navigate(url)
-
-        marionette.execute_async_script(self.wait_script)
-
-        screenshot = marionette.screenshot(full=False)
-        # strip off the data:img/png, part of the url
-        if screenshot.startswith("data:image/png;base64,"):
-            screenshot = screenshot.split(",", 1)[1]
-
-        return screenshot
-
-
-class WdspecRun(object):
-    def __init__(self, func, session, path, timeout):
-        self.func = func
-        self.result = (None, None)
-        self.session = session
-        self.path = path
-        self.timeout = timeout
-        self.result_flag = threading.Event()
-
-    def run(self):
-        """Runs function in a thread and interrupts it if it exceeds the
-        given timeout.  Returns (True, (Result, [SubtestResult ...])) in
-        case of success, or (False, (status, extra information)) in the
-        event of failure.
-        """
-
-        executor = threading.Thread(target=self._run)
-        executor.start()
-
-        flag = self.result_flag.wait(self.timeout)
-        if self.result[1] is None:
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-
-        return self.result
-
-    def _run(self):
-        try:
-            self.result = True, self.func(self.session, self.path, self.timeout)
-        except (socket.timeout, IOError):
-            self.result = False, ("CRASH", None)
-        except Exception as e:
-            message = getattr(e, "message")
-            if message:
-                message += "\n"
-            message += traceback.format_exc(e)
-            self.result = False, ("ERROR", message)
-        finally:
-            self.result_flag.set()
-
-
-class MarionetteWdspecExecutor(WdspecExecutor):
-    def __init__(self, browser, server_config, webdriver_binary,
-                 timeout_multiplier=1, close_after_done=True, debug_info=None,
-                 capabilities=None):
-        self.do_delayed_imports()
-        WdspecExecutor.__init__(self, browser, server_config,
-                                timeout_multiplier=timeout_multiplier,
-                                debug_info=debug_info)
-        self.webdriver_binary = webdriver_binary
-        self.capabilities = capabilities
-        self.protocol = RemoteMarionetteProtocol(self, browser)
-
-    def is_alive(self):
-        return self.protocol.is_alive
-
-    def on_environment_change(self, new_environment):
-        pass
-
-    def do_test(self, test):
-        timeout = test.timeout * self.timeout_multiplier + extra_timeout
-
-        success, data = WdspecRun(self.do_wdspec,
-                                  self.protocol.session_config,
-                                  test.abs_path,
-                                  timeout).run()
-
-        if success:
-            return self.convert_result(test, data)
-
-        return (test.result_cls(*data), [])
-
-    def do_wdspec(self, session_config, path, timeout):
-        harness_result = ("OK", None)
-        subtest_results = pytestrunner.run(path,
-                                           self.server_config,
-                                           session_config,
-                                           timeout=timeout)
-        return (harness_result, subtest_results)
-
-    def do_delayed_imports(self):
-        global pytestrunner
-        from . import pytestrunner
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/executorselenium.py
+++ /dev/null
@@ -1,264 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import socket
-import sys
-import threading
-import time
-import traceback
-import urlparse
-import uuid
-
-from .base import (ExecutorException,
-                   Protocol,
-                   RefTestExecutor,
-                   RefTestImplementation,
-                   TestExecutor,
-                   TestharnessExecutor,
-                   testharness_result_converter,
-                   reftest_result_converter,
-                   strip_server)
-from ..testrunner import Stop
-
-here = os.path.join(os.path.split(__file__)[0])
-
-webdriver = None
-exceptions = None
-RemoteConnection = None
-
-extra_timeout = 5
-
-def do_delayed_imports():
-    global webdriver
-    global exceptions
-    global RemoteConnection
-    from selenium import webdriver
-    from selenium.common import exceptions
-    from selenium.webdriver.remote.remote_connection import RemoteConnection
-
-class SeleniumProtocol(Protocol):
-    def __init__(self, executor, browser, capabilities, **kwargs):
-        do_delayed_imports()
-
-        Protocol.__init__(self, executor, browser)
-        self.capabilities = capabilities
-        self.url = browser.webdriver_url
-        self.webdriver = None
-
-    def setup(self, runner):
-        """Connect to browser via Selenium's WebDriver implementation."""
-        self.runner = runner
-        self.logger.debug("Connecting to Selenium on URL: %s" % self.url)
-
-        session_started = False
-        try:
-            self.webdriver = webdriver.Remote(command_executor=RemoteConnection(self.url.strip("/"),
-                                                                                resolve_ip=False),
-                                              desired_capabilities=self.capabilities)
-        except:
-            self.logger.warning(
-                "Connecting to Selenium failed:\n%s" % traceback.format_exc())
-        else:
-            self.logger.debug("Selenium session started")
-            session_started = True
-
-        if not session_started:
-            self.logger.warning("Failed to connect to Selenium")
-            self.executor.runner.send_message("init_failed")
-        else:
-            try:
-                self.after_connect()
-            except:
-                print >> sys.stderr, traceback.format_exc()
-                self.logger.warning(
-                    "Failed to connect to navigate initial page")
-                self.executor.runner.send_message("init_failed")
-            else:
-                self.executor.runner.send_message("init_succeeded")
-
-    def teardown(self):
-        self.logger.debug("Hanging up on Selenium session")
-        try:
-            self.webdriver.quit()
-        except:
-            pass
-        del self.webdriver
-
-    def is_alive(self):
-        try:
-            # Get a simple property over the connection
-            self.webdriver.current_window_handle
-        # TODO what exception?
-        except (socket.timeout, exceptions.ErrorInResponseException):
-            return False
-        return True
-
-    def after_connect(self):
-        self.load_runner("http")
-
-    def load_runner(self, protocol):
-        url = urlparse.urljoin(self.executor.server_url(protocol),
-                               "/testharness_runner.html")
-        self.logger.debug("Loading %s" % url)
-        self.webdriver.get(url)
-        self.webdriver.execute_script("document.title = '%s'" %
-                                      threading.current_thread().name.replace("'", '"'))
-
-    def wait(self):
-        while True:
-            try:
-                self.webdriver.execute_async_script("");
-            except exceptions.TimeoutException:
-                pass
-            except (socket.timeout, exceptions.NoSuchWindowException,
-                    exceptions.ErrorInResponseException, IOError):
-                break
-            except Exception as e:
-                self.logger.error(traceback.format_exc(e))
-                break
-
-
-class SeleniumRun(object):
-    def __init__(self, func, webdriver, url, timeout):
-        self.func = func
-        self.result = None
-        self.webdriver = webdriver
-        self.url = url
-        self.timeout = timeout
-        self.result_flag = threading.Event()
-
-    def run(self):
-        timeout = self.timeout
-
-        try:
-            self.webdriver.set_script_timeout((timeout + extra_timeout) * 1000)
-        except exceptions.ErrorInResponseException:
-            self.logger.error("Lost WebDriver connection")
-            return Stop
-
-        executor = threading.Thread(target=self._run)
-        executor.start()
-
-        flag = self.result_flag.wait(timeout + 2 * extra_timeout)
-        if self.result is None:
-            assert not flag
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-
-        return self.result
-
-    def _run(self):
-        try:
-            self.result = True, self.func(self.webdriver, self.url, self.timeout)
-        except exceptions.TimeoutException:
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-        except (socket.timeout, exceptions.ErrorInResponseException):
-            self.result = False, ("CRASH", None)
-        except Exception as e:
-            message = getattr(e, "message", "")
-            if message:
-                message += "\n"
-            message += traceback.format_exc(e)
-            self.result = False, ("ERROR", e)
-        finally:
-            self.result_flag.set()
-
-
-class SeleniumTestharnessExecutor(TestharnessExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 close_after_done=True, capabilities=None, debug_info=None):
-        """Selenium-based executor for testharness.js tests"""
-        TestharnessExecutor.__init__(self, browser, server_config,
-                                     timeout_multiplier=timeout_multiplier,
-                                     debug_info=debug_info)
-        self.protocol = SeleniumProtocol(self, browser, capabilities)
-        with open(os.path.join(here, "testharness_webdriver.js")) as f:
-            self.script = f.read()
-        self.close_after_done = close_after_done
-        self.window_id = str(uuid.uuid4())
-
-    def is_alive(self):
-        return self.protocol.is_alive()
-
-    def on_environment_change(self, new_environment):
-        if new_environment["protocol"] != self.last_environment["protocol"]:
-            self.protocol.load_runner(new_environment["protocol"])
-
-    def do_test(self, test):
-        url = self.test_url(test)
-
-        success, data = SeleniumRun(self.do_testharness,
-                                    self.protocol.webdriver,
-                                    url,
-                                    test.timeout * self.timeout_multiplier).run()
-
-        if success:
-            return self.convert_result(test, data)
-
-        return (test.result_cls(*data), [])
-
-    def do_testharness(self, webdriver, url, timeout):
-        return webdriver.execute_async_script(
-            self.script % {"abs_url": url,
-                           "url": strip_server(url),
-                           "window_id": self.window_id,
-                           "timeout_multiplier": self.timeout_multiplier,
-                           "timeout": timeout * 1000})
-
-class SeleniumRefTestExecutor(RefTestExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 screenshot_cache=None, close_after_done=True,
-                 debug_info=None, capabilities=None):
-        """Selenium WebDriver-based executor for reftests"""
-        RefTestExecutor.__init__(self,
-                                 browser,
-                                 server_config,
-                                 screenshot_cache=screenshot_cache,
-                                 timeout_multiplier=timeout_multiplier,
-                                 debug_info=debug_info)
-        self.protocol = SeleniumProtocol(self, browser,
-                                         capabilities=capabilities)
-        self.implementation = RefTestImplementation(self)
-        self.close_after_done = close_after_done
-        self.has_window = False
-
-        with open(os.path.join(here, "reftest.js")) as f:
-            self.script = f.read()
-        with open(os.path.join(here, "reftest-wait_webdriver.js")) as f:
-            self.wait_script = f.read()
-
-    def is_alive(self):
-        return self.protocol.is_alive()
-
-    def do_test(self, test):
-        self.logger.info("Test requires OS-level window focus")
-
-        self.protocol.webdriver.set_window_size(600, 600)
-
-        result = self.implementation.run_test(test)
-
-        return self.convert_result(test, result)
-
-    def screenshot(self, test, viewport_size, dpi):
-        # https://github.com/w3c/wptrunner/issues/166
-        assert viewport_size is None
-        assert dpi is None
-
-        return SeleniumRun(self._screenshot,
-                           self.protocol.webdriver,
-                           self.test_url(test),
-                           test.timeout).run()
-
-    def _screenshot(self, webdriver, url, timeout):
-        webdriver.get(url)
-
-        webdriver.execute_async_script(self.wait_script)
-
-        screenshot = webdriver.get_screenshot_as_base64()
-
-        # strip off the data:img/png, part of the url
-        if screenshot.startswith("data:image/png;base64,"):
-            screenshot = screenshot.split(",", 1)[1]
-
-        return screenshot
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/executorservo.py
+++ /dev/null
@@ -1,367 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import base64
-import hashlib
-import httplib
-import json
-import os
-import subprocess
-import tempfile
-import threading
-import traceback
-import urlparse
-import uuid
-from collections import defaultdict
-
-from mozprocess import ProcessHandler
-
-from .base import (ExecutorException,
-                   Protocol,
-                   RefTestImplementation,
-                   testharness_result_converter,
-                   reftest_result_converter,
-                   WdspecExecutor)
-from .process import ProcessTestExecutor
-from ..browsers.base import browser_command
-from ..wpttest import WdspecResult, WdspecSubtestResult
-from ..webdriver_server import ServoDriverServer
-from .executormarionette import WdspecRun
-
-pytestrunner = None
-webdriver = None
-
-extra_timeout = 5 # seconds
-
-hosts_text = """127.0.0.1 web-platform.test
-127.0.0.1 www.web-platform.test
-127.0.0.1 www1.web-platform.test
-127.0.0.1 www2.web-platform.test
-127.0.0.1 xn--n8j6ds53lwwkrqhv28a.web-platform.test
-127.0.0.1 xn--lve-6lad.web-platform.test
-"""
-
-def make_hosts_file():
-    hosts_fd, hosts_path = tempfile.mkstemp()
-    with os.fdopen(hosts_fd, "w") as f:
-        f.write(hosts_text)
-    return hosts_path
-
-
-class ServoTestharnessExecutor(ProcessTestExecutor):
-    convert_result = testharness_result_converter
-
-    def __init__(self, browser, server_config, timeout_multiplier=1, debug_info=None,
-                 pause_after_test=False):
-        do_delayed_imports()
-        ProcessTestExecutor.__init__(self, browser, server_config,
-                                     timeout_multiplier=timeout_multiplier,
-                                     debug_info=debug_info)
-        self.pause_after_test = pause_after_test
-        self.result_data = None
-        self.result_flag = None
-        self.protocol = Protocol(self, browser)
-        self.hosts_path = make_hosts_file()
-
-    def teardown(self):
-        try:
-            os.unlink(self.hosts_path)
-        except OSError:
-            pass
-        ProcessTestExecutor.teardown(self)
-
-    def do_test(self, test):
-        self.result_data = None
-        self.result_flag = threading.Event()
-
-        args = [
-            "--hard-fail", "-u", "Servo/wptrunner",
-            "-Z", "replace-surrogates", "-z", self.test_url(test),
-        ]
-        for stylesheet in self.browser.user_stylesheets:
-            args += ["--user-stylesheet", stylesheet]
-        for pref, value in test.environment.get('prefs', {}).iteritems():
-            args += ["--pref", "%s=%s" % (pref, value)]
-        args += self.browser.binary_args
-        debug_args, command = browser_command(self.binary, args, self.debug_info)
-
-        self.command = command
-
-        if self.pause_after_test:
-            self.command.remove("-z")
-
-        self.command = debug_args + self.command
-
-        env = os.environ.copy()
-        env["HOST_FILE"] = self.hosts_path
-        env["RUST_BACKTRACE"] = "1"
-
-
-        if not self.interactive:
-            self.proc = ProcessHandler(self.command,
-                                       processOutputLine=[self.on_output],
-                                       onFinish=self.on_finish,
-                                       env=env,
-                                       storeOutput=False)
-            self.proc.run()
-        else:
-            self.proc = subprocess.Popen(self.command, env=env)
-
-        try:
-            timeout = test.timeout * self.timeout_multiplier
-
-            # Now wait to get the output we expect, or until we reach the timeout
-            if not self.interactive and not self.pause_after_test:
-                wait_timeout = timeout + 5
-                self.result_flag.wait(wait_timeout)
-            else:
-                wait_timeout = None
-                self.proc.wait()
-
-            proc_is_running = True
-
-            if self.result_flag.is_set():
-                if self.result_data is not None:
-                    result = self.convert_result(test, self.result_data)
-                else:
-                    self.proc.wait()
-                    result = (test.result_cls("CRASH", None), [])
-                    proc_is_running = False
-            else:
-                result = (test.result_cls("TIMEOUT", None), [])
-
-
-            if proc_is_running:
-                if self.pause_after_test:
-                    self.logger.info("Pausing until the browser exits")
-                    self.proc.wait()
-                else:
-                    self.proc.kill()
-        except KeyboardInterrupt:
-            self.proc.kill()
-            raise
-
-        return result
-
-    def on_output(self, line):
-        prefix = "ALERT: RESULT: "
-        line = line.decode("utf8", "replace")
-        if line.startswith(prefix):
-            self.result_data = json.loads(line[len(prefix):])
-            self.result_flag.set()
-        else:
-            if self.interactive:
-                print line
-            else:
-                self.logger.process_output(self.proc.pid,
-                                           line,
-                                           " ".join(self.command))
-
-    def on_finish(self):
-        self.result_flag.set()
-
-
-class TempFilename(object):
-    def __init__(self, directory):
-        self.directory = directory
-        self.path = None
-
-    def __enter__(self):
-        self.path = os.path.join(self.directory, str(uuid.uuid4()))
-        return self.path
-
-    def __exit__(self, *args, **kwargs):
-        try:
-            os.unlink(self.path)
-        except OSError:
-            pass
-
-
-class ServoRefTestExecutor(ProcessTestExecutor):
-    convert_result = reftest_result_converter
-
-    def __init__(self, browser, server_config, binary=None, timeout_multiplier=1,
-                 screenshot_cache=None, debug_info=None, pause_after_test=False):
-        do_delayed_imports()
-        ProcessTestExecutor.__init__(self,
-                                     browser,
-                                     server_config,
-                                     timeout_multiplier=timeout_multiplier,
-                                     debug_info=debug_info)
-
-        self.protocol = Protocol(self, browser)
-        self.screenshot_cache = screenshot_cache
-        self.implementation = RefTestImplementation(self)
-        self.tempdir = tempfile.mkdtemp()
-        self.hosts_path = make_hosts_file()
-
-    def teardown(self):
-        try:
-            os.unlink(self.hosts_path)
-        except OSError:
-            pass
-        os.rmdir(self.tempdir)
-        ProcessTestExecutor.teardown(self)
-
-    def screenshot(self, test, viewport_size, dpi):
-        full_url = self.test_url(test)
-
-        with TempFilename(self.tempdir) as output_path:
-            debug_args, command = browser_command(
-                self.binary,
-                [
-                    "--hard-fail", "--exit",
-                    "-u", "Servo/wptrunner",
-                    "-Z", "disable-text-aa,load-webfonts-synchronously,replace-surrogates",
-                    "--output=%s" % output_path, full_url
-                ] + self.browser.binary_args,
-                self.debug_info)
-
-            for stylesheet in self.browser.user_stylesheets:
-                command += ["--user-stylesheet", stylesheet]
-
-            for pref, value in test.environment.get('prefs', {}).iteritems():
-                command += ["--pref", "%s=%s" % (pref, value)]
-
-            command += ["--resolution", viewport_size or "800x600"]
-
-            if dpi:
-                command += ["--device-pixel-ratio", dpi]
-
-            # Run ref tests in headless mode
-            command += ["-z"]
-
-            self.command = debug_args + command
-
-            env = os.environ.copy()
-            env["HOST_FILE"] = self.hosts_path
-            env["RUST_BACKTRACE"] = "1"
-
-            if not self.interactive:
-                self.proc = ProcessHandler(self.command,
-                                           processOutputLine=[self.on_output],
-                                           env=env)
-
-
-                try:
-                    self.proc.run()
-                    timeout = test.timeout * self.timeout_multiplier + 5
-                    rv = self.proc.wait(timeout=timeout)
-                except KeyboardInterrupt:
-                    self.proc.kill()
-                    raise
-            else:
-                self.proc = subprocess.Popen(self.command,
-                                             env=env)
-                try:
-                    rv = self.proc.wait()
-                except KeyboardInterrupt:
-                    self.proc.kill()
-                    raise
-
-            if rv is None:
-                self.proc.kill()
-                return False, ("EXTERNAL-TIMEOUT", None)
-
-            if rv != 0 or not os.path.exists(output_path):
-                return False, ("CRASH", None)
-
-            with open(output_path) as f:
-                # Might need to strip variable headers or something here
-                data = f.read()
-                return True, base64.b64encode(data)
-
-    def do_test(self, test):
-        result = self.implementation.run_test(test)
-
-        return self.convert_result(test, result)
-
-    def on_output(self, line):
-        line = line.decode("utf8", "replace")
-        if self.interactive:
-            print line
-        else:
-            self.logger.process_output(self.proc.pid,
-                                       line,
-                                       " ".join(self.command))
-
-class ServoWdspecProtocol(Protocol):
-    def __init__(self, executor, browser):
-        self.do_delayed_imports()
-        Protocol.__init__(self, executor, browser)
-        self.session = None
-        self.server = None
-
-    def setup(self, runner):
-        try:
-            self.server = ServoDriverServer(self.logger, binary=self.browser.binary, binary_args=self.browser.binary_args)
-            self.server.start(block=False)
-            self.logger.info(
-                "WebDriver HTTP server listening at %s" % self.server.url)
-
-            self.logger.info(
-                "Establishing new WebDriver session with %s" % self.server.url)
-            self.session = webdriver.Session(
-                self.server.host, self.server.port, self.server.base_path)
-        except Exception:
-            self.logger.error(traceback.format_exc())
-            self.executor.runner.send_message("init_failed")
-        else:
-            self.executor.runner.send_message("init_succeeded")
-
-    def teardown(self):
-        if self.server is not None:
-            try:
-                if self.session.session_id is not None:
-                    self.session.end()
-            except Exception:
-                pass
-            if self.server.is_alive:
-                self.server.stop()
-
-    @property
-    def is_alive(self):
-        conn = httplib.HTTPConnection(self.server.host, self.server.port)
-        conn.request("HEAD", self.server.base_path + "invalid")
-        res = conn.getresponse()
-        return res.status == 404
-
-    def do_delayed_imports(self):
-        global pytestrunner, webdriver
-        from . import pytestrunner
-        import webdriver
-
-
-class ServoWdspecExecutor(WdspecExecutor):
-    def __init__(self, browser, server_config,
-                 timeout_multiplier=1, close_after_done=True, debug_info=None,
-                 **kwargs):
-        WdspecExecutor.__init__(self, browser, server_config,
-                                timeout_multiplier=timeout_multiplier,
-                                debug_info=debug_info)
-        self.protocol = ServoWdspecProtocol(self, browser)
-
-    def is_alive(self):
-        return self.protocol.is_alive
-
-    def on_environment_change(self, new_environment):
-        pass
-
-    def do_test(self, test):
-        timeout = test.timeout * self.timeout_multiplier + extra_timeout
-
-        success, data = WdspecRun(self.do_wdspec,
-                                  self.protocol.session,
-                                  test.path,
-                                  timeout).run()
-
-        if success:
-            return self.convert_result(test, data)
-
-        return (test.result_cls(*data), [])
-
-    def do_wdspec(self, session, path, timeout):
-        harness_result = ("OK", None)
-        subtest_results = pytestrunner.run(path, session, timeout=timeout)
-        return (harness_result, subtest_results)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/executorservodriver.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import json
-import os
-import socket
-import threading
-import time
-import traceback
-
-from .base import (Protocol,
-                   RefTestExecutor,
-                   RefTestImplementation,
-                   TestharnessExecutor,
-                   strip_server)
-from ..testrunner import Stop
-
-webdriver = None
-
-here = os.path.join(os.path.split(__file__)[0])
-
-extra_timeout = 5
-
-
-def do_delayed_imports():
-    global webdriver
-    import webdriver
-
-
-class ServoWebDriverProtocol(Protocol):
-    def __init__(self, executor, browser, capabilities, **kwargs):
-        do_delayed_imports()
-        Protocol.__init__(self, executor, browser)
-        self.capabilities = capabilities
-        self.host = browser.webdriver_host
-        self.port = browser.webdriver_port
-        self.session = None
-
-    def setup(self, runner):
-        """Connect to browser via WebDriver."""
-        self.runner = runner
-
-        url = "http://%s:%d" % (self.host, self.port)
-        session_started = False
-        try:
-            self.session = webdriver.Session(self.host, self.port,
-                extension=webdriver.servo.ServoCommandExtensions)
-            self.session.start()
-        except:
-            self.logger.warning(
-                "Connecting with WebDriver failed:\n%s" % traceback.format_exc())
-        else:
-            self.logger.debug("session started")
-            session_started = True
-
-        if not session_started:
-            self.logger.warning("Failed to connect via WebDriver")
-            self.executor.runner.send_message("init_failed")
-        else:
-            self.executor.runner.send_message("init_succeeded")
-
-    def teardown(self):
-        self.logger.debug("Hanging up on WebDriver session")
-        try:
-            self.session.end()
-        except:
-            pass
-
-    def is_alive(self):
-        try:
-            # Get a simple property over the connection
-            self.session.window_handle
-        # TODO what exception?
-        except Exception:
-            return False
-        return True
-
-    def after_connect(self):
-        pass
-
-    def wait(self):
-        while True:
-            try:
-                self.session.execute_async_script("")
-            except webdriver.TimeoutException:
-                pass
-            except (socket.timeout, IOError):
-                break
-            except Exception as e:
-                self.logger.error(traceback.format_exc(e))
-                break
-
-    def on_environment_change(self, old_environment, new_environment):
-        #Unset all the old prefs
-        self.session.extension.reset_prefs(*old_environment.get("prefs", {}).keys())
-        self.session.extension.set_prefs(new_environment.get("prefs", {}))
-
-
-class ServoWebDriverRun(object):
-    def __init__(self, func, session, url, timeout, current_timeout=None):
-        self.func = func
-        self.result = None
-        self.session = session
-        self.url = url
-        self.timeout = timeout
-        self.result_flag = threading.Event()
-
-    def run(self):
-        executor = threading.Thread(target=self._run)
-        executor.start()
-
-        flag = self.result_flag.wait(self.timeout + extra_timeout)
-        if self.result is None:
-            assert not flag
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-
-        return self.result
-
-    def _run(self):
-        try:
-            self.result = True, self.func(self.session, self.url, self.timeout)
-        except webdriver.TimeoutException:
-            self.result = False, ("EXTERNAL-TIMEOUT", None)
-        except (socket.timeout, IOError):
-            self.result = False, ("CRASH", None)
-        except Exception as e:
-            message = getattr(e, "message", "")
-            if message:
-                message += "\n"
-            message += traceback.format_exc(e)
-            self.result = False, ("ERROR", e)
-        finally:
-            self.result_flag.set()
-
-
-def timeout_func(timeout):
-    if timeout:
-        t0 = time.time()
-        return lambda: time.time() - t0 > timeout + extra_timeout
-    else:
-        return lambda: False
-
-
-class ServoWebDriverTestharnessExecutor(TestharnessExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 close_after_done=True, capabilities=None, debug_info=None):
-        TestharnessExecutor.__init__(self, browser, server_config, timeout_multiplier=1,
-                                     debug_info=None)
-        self.protocol = ServoWebDriverProtocol(self, browser, capabilities=capabilities)
-        with open(os.path.join(here, "testharness_servodriver.js")) as f:
-            self.script = f.read()
-        self.timeout = None
-
-    def on_protocol_change(self, new_protocol):
-        pass
-
-    def is_alive(self):
-        return self.protocol.is_alive()
-
-    def do_test(self, test):
-        url = self.test_url(test)
-
-        timeout = test.timeout * self.timeout_multiplier + extra_timeout
-
-        if timeout != self.timeout:
-            try:
-                self.protocol.session.timeouts.script = timeout
-                self.timeout = timeout
-            except IOError:
-                self.logger.error("Lost webdriver connection")
-                return Stop
-
-        success, data = ServoWebDriverRun(self.do_testharness,
-                                          self.protocol.session,
-                                          url,
-                                          timeout).run()
-
-        if success:
-            return self.convert_result(test, data)
-
-        return (test.result_cls(*data), [])
-
-    def do_testharness(self, session, url, timeout):
-        session.url = url
-        result = json.loads(
-            session.execute_async_script(
-                self.script % {"abs_url": url,
-                               "url": strip_server(url),
-                               "timeout_multiplier": self.timeout_multiplier,
-                               "timeout": timeout * 1000}))
-        # Prevent leaking every page in history until Servo develops a more sane
-        # page cache
-        session.back()
-        return result
-
-
-class TimeoutError(Exception):
-    pass
-
-
-class ServoWebDriverRefTestExecutor(RefTestExecutor):
-    def __init__(self, browser, server_config, timeout_multiplier=1,
-                 screenshot_cache=None, capabilities=None, debug_info=None):
-        """Selenium WebDriver-based executor for reftests"""
-        RefTestExecutor.__init__(self,
-                                 browser,
-                                 server_config,
-                                 screenshot_cache=screenshot_cache,
-                                 timeout_multiplier=timeout_multiplier,
-                                 debug_info=debug_info)
-        self.protocol = ServoWebDriverProtocol(self, browser,
-                                               capabilities=capabilities)
-        self.implementation = RefTestImplementation(self)
-        self.timeout = None
-        with open(os.path.join(here, "reftest-wait_servodriver.js")) as f:
-            self.wait_script = f.read()
-
-    def is_alive(self):
-        return self.protocol.is_alive()
-
-    def do_test(self, test):
-        try:
-            result = self.implementation.run_test(test)
-            return self.convert_result(test, result)
-        except IOError:
-            return test.result_cls("CRASH", None), []
-        except TimeoutError:
-            return test.result_cls("TIMEOUT", None), []
-        except Exception as e:
-            message = getattr(e, "message", "")
-            if message:
-                message += "\n"
-            message += traceback.format_exc(e)
-            return test.result_cls("ERROR", message), []
-
-    def screenshot(self, test, viewport_size, dpi):
-        # https://github.com/w3c/wptrunner/issues/166
-        assert viewport_size is None
-        assert dpi is None
-
-        timeout = (test.timeout * self.timeout_multiplier + extra_timeout
-                   if self.debug_info is None else None)
-
-        if self.timeout != timeout:
-            try:
-                self.protocol.session.timeouts.script = timeout
-                self.timeout = timeout
-            except IOError:
-                self.logger.error("Lost webdriver connection")
-                return Stop
-
-        return ServoWebDriverRun(self._screenshot,
-                                 self.protocol.session,
-                                 self.test_url(test),
-                                 timeout).run()
-
-    def _screenshot(self, session, url, timeout):
-        session.url = url
-        session.execute_async_script(self.wait_script)
-        return session.screenshot()
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/process.py
+++ /dev/null
@@ -1,24 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from .base import TestExecutor
-
-
-class ProcessTestExecutor(TestExecutor):
-    def __init__(self, *args, **kwargs):
-        TestExecutor.__init__(self, *args, **kwargs)
-        self.binary = self.browser.binary
-        self.interactive = (False if self.debug_info is None
-                            else self.debug_info.interactive)
-
-    def setup(self, runner):
-        self.runner = runner
-        self.runner.send_message("init_succeeded")
-        return True
-
-    def is_alive(self):
-        return True
-
-    def do_test(self, test):
-        raise NotImplementedError
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/pytestrunner/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from . import fixtures
-from .runner import run
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/pytestrunner/fixtures.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import pytest
-import webdriver
-
-import contextlib
-import httplib
-import urlparse
-
-
-"""pytest fixtures for use in Python-based WPT tests.
-
-The purpose of test fixtures is to provide a fixed baseline upon which
-tests can reliably and repeatedly execute.
-"""
-
-
-class Session(object):
-    """Fixture to allow access to wptrunner's existing WebDriver session
-    in tests.
-
-    The session is not created by default to enable testing of session
-    creation.  However, a function-scoped session will be implicitly created
-    at the first call to a WebDriver command.  This means methods such as
-    `session.send_command` and `session.session_id` are possible to use
-    without having a session.
-
-    To illustrate implicit session creation::
-
-        def test_session_scope(session):
-            # at this point there is no session
-            assert session.session_id is None
-
-            # window_id is a WebDriver command,
-            # and implicitly creates the session for us
-            assert session.window_id is not None
-
-            # we now have a session
-            assert session.session_id is not None
-
-    You can also access the session in custom fixtures defined in the
-    tests, such as a setup function::
-
-        @pytest.fixture(scope="function")
-        def setup(request, session):
-            session.url = "https://example.org"
-
-        def test_something(setup, session):
-            assert session.url == "https://example.org"
-
-    When the test function goes out of scope, any remaining user prompts
-    and opened windows are closed, and the current browsing context is
-    switched back to the top-level browsing context.
-    """
-
-    def __init__(self, client):
-        self.client = client
-
-    @pytest.fixture(scope="module")
-    def session(self, request):
-        # finalisers are popped off a stack,
-        # making their ordering reverse
-        request.addfinalizer(self.switch_to_top_level_browsing_context)
-        request.addfinalizer(self.restore_windows)
-        request.addfinalizer(self.dismiss_user_prompts)
-
-        return self.client
-
-    def dismiss_user_prompts(self):
-        """Dismisses any open user prompts in windows."""
-        current_window = self.client.window_handle
-
-        for window in self.windows():
-            self.client.window_handle = window
-            try:
-                self.client.alert.dismiss()
-            except webdriver.NoSuchAlertException:
-                pass
-
-        self.client.window_handle = current_window
-
-    def restore_windows(self):
-        """Closes superfluous windows opened by the test without ending
-        the session implicitly by closing the last window.
-        """
-        current_window = self.client.window_handle
-
-        for window in self.windows(exclude=[current_window]):
-            self.client.window_handle = window
-            if len(self.client.window_handles) > 1:
-                self.client.close()
-
-        self.client.window_handle = current_window
-
-    def switch_to_top_level_browsing_context(self):
-        """If the current browsing context selected by WebDriver is a
-        `<frame>` or an `<iframe>`, switch it back to the top-level
-        browsing context.
-        """
-        self.client.switch_frame(None)
-
-    def windows(self, exclude=None):
-        """Set of window handles, filtered by an `exclude` list if
-        provided.
-        """
-        if exclude is None:
-            exclude = []
-        wins = [w for w in self.client.handles if w not in exclude]
-        return set(wins)
-
-
-class HTTPRequest(object):
-    def __init__(self, host, port):
-        self.host = host
-        self.port = port
-
-    def head(self, path):
-        return self._request("HEAD", path)
-
-    def get(self, path):
-        return self._request("GET", path)
-
-    @contextlib.contextmanager
-    def _request(self, method, path):
-        conn = httplib.HTTPConnection(self.host, self.port)
-        try:
-            conn.request(method, path)
-            yield conn.getresponse()
-        finally:
-            conn.close()
-
-class Server(object):
-    """Fixture to allow access to wptrunner's base server url.
-
-    :param url_getter: Function to get server url from test environment, given
-        a protocol.
-    """
-    def __init__(self, url_getter):
-        self.server_url = url_getter
-
-    def where_is(self, uri, protocol="http"):
-        return urlparse.urljoin(self.server_url(protocol), uri)
-
-    @pytest.fixture
-    def server(self, request):
-        return self
-
-@pytest.fixture(scope="module")
-def http(session):
-    return HTTPRequest(session.transport.host, session.transport.port)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/pytestrunner/runner.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-"""Provides interface to deal with pytest.
-
-Usage::
-
-    session = webdriver.client.Session("127.0.0.1", "4444", "/")
-    harness_result = ("OK", None)
-    subtest_results = pytestrunner.run("/path/to/test", session.url)
-    return (harness_result, subtest_results)
-"""
-
-import errno
-import json
-import os
-import shutil
-import tempfile
-
-from . import fixtures
-
-
-pytest = None
-
-
-def do_delayed_imports():
-    global pytest
-    import pytest
-
-
-def run(path, server_config, session_config, timeout=0):
-    """Run Python test at ``path`` in pytest.  The provided ``session``
-    is exposed as a fixture available in the scope of the test functions.
-
-    :param path: Path to the test file.
-    :param session_config: dictionary of host, port,capabilities parameters
-    to pass through to the webdriver session
-    :param timeout: Duration before interrupting potentially hanging
-        tests.  If 0, there is no timeout.
-
-    :returns: List of subtest results, which are tuples of (test id,
-        status, message, stacktrace).
-    """
-
-    if pytest is None:
-        do_delayed_imports()
-
-    recorder = SubtestResultRecorder()
-
-    os.environ["WD_HOST"] = session_config["host"]
-    os.environ["WD_PORT"] = str(session_config["port"])
-    os.environ["WD_CAPABILITIES"] = json.dumps(session_config["capabilities"])
-    os.environ["WD_SERVER_CONFIG"] = json.dumps(server_config)
-
-    plugins = [recorder]
-
-    # TODO(ato): Deal with timeouts
-
-    with TemporaryDirectory() as cache:
-        pytest.main(["--strict",  # turn warnings into errors
-                     "--verbose",  # show each individual subtest
-                     "--capture", "no",  # enable stdout/stderr from tests
-                     "--basetemp", cache,  # temporary directory
-                     path],
-                    plugins=plugins)
-
-    return recorder.results
-
-
-class SubtestResultRecorder(object):
-    def __init__(self):
-        self.results = []
-
-    def pytest_runtest_logreport(self, report):
-        if report.passed and report.when == "call":
-            self.record_pass(report)
-        elif report.failed:
-            if report.when != "call":
-                self.record_error(report)
-            else:
-                self.record_fail(report)
-        elif report.skipped:
-            self.record_skip(report)
-
-    def record_pass(self, report):
-        self.record(report.nodeid, "PASS")
-
-    def record_fail(self, report):
-        self.record(report.nodeid, "FAIL", stack=report.longrepr)
-
-    def record_error(self, report):
-        # error in setup/teardown
-        if report.when != "call":
-            message = "%s error" % report.when
-        self.record(report.nodeid, "ERROR", message, report.longrepr)
-
-    def record_skip(self, report):
-        self.record(report.nodeid, "ERROR",
-                    "In-test skip decorators are disallowed, "
-                    "please use WPT metadata to ignore tests.")
-
-    def record(self, test, status, message=None, stack=None):
-        if stack is not None:
-            stack = str(stack)
-        new_result = (test, status, message, stack)
-        self.results.append(new_result)
-
-
-class TemporaryDirectory(object):
-    def __enter__(self):
-        self.path = tempfile.mkdtemp(prefix="pytest-")
-        return self.path
-
-    def __exit__(self, *args):
-        try:
-            shutil.rmtree(self.path)
-        except OSError as e:
-            # no such file or directory
-            if e.errno != errno.ENOENT:
-                raise
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/reftest-wait.js
+++ /dev/null
@@ -1,22 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-function test(x) {
-  log("classList: " + root.classList);
-  if (!root.classList.contains("reftest-wait")) {
-    observer.disconnect();
-    marionetteScriptFinished();
-  }
-}
-
-var root = document.documentElement;
-var observer = new MutationObserver(test);
-
-observer.observe(root, {attributes: true});
-
-if (document.readyState != "complete") {
-  onload = test
-} else {
-  test();
-}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/reftest-wait_servodriver.js
+++ /dev/null
@@ -1,20 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/.
- */
-
-callback = arguments[arguments.length - 1];
-
-function check_done() {
-    if (!document.documentElement.classList.contains('reftest-wait')) {
-        callback();
-    } else {
-        setTimeout(check_done, 50);
-    }
-}
-
-if (document.readyState === 'complete') {
-    check_done();
-} else {
-    addEventListener("load", check_done);
-}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/reftest-wait_webdriver.js
+++ /dev/null
@@ -1,23 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var callback = arguments[arguments.length - 1];
-
-function test(x) {
-  if (!root.classList.contains("reftest-wait")) {
-    observer.disconnect();
-    callback()
-  }
-}
-
-var root = document.documentElement;
-var observer = new MutationObserver(test);
-
-observer.observe(root, {attributes: true});
-
-if (document.readyState != "complete") {
-    onload = test;
-} else {
-    test();
-}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/reftest.js
+++ /dev/null
@@ -1,5 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var win = window.open("about:blank", "test", "width=600,height=600");
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/testharness_marionette.js
+++ /dev/null
@@ -1,36 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-window.wrappedJSObject.timeout_multiplier = %(timeout_multiplier)d;
-window.wrappedJSObject.explicit_timeout = %(explicit_timeout)d;
-
-window.wrappedJSObject.addEventListener("message", function listener(event) {
-    if (event.data.type != "complete") {
-        return;
-    }
-    window.wrappedJSObject.removeEventListener("message", listener);
-    clearTimeout(timer);
-    var tests = event.data.tests;
-    var status = event.data.status;
-
-    var subtest_results = tests.map(function (x) {
-        return [x.name, x.status, x.message, x.stack]
-    });
-
-    marionetteScriptFinished(["%(url)s",
-                              status.status,
-                              status.message,
-                              status.stack,
-                              subtest_results]);
-}, false);
-
-window.wrappedJSObject.win = window.open("%(abs_url)s", "%(window_id)s");
-
-var timer = null;
-if (%(timeout)s) {
-    timer = setTimeout(function() {
-        log("Timeout fired");
-        window.wrappedJSObject.win.timeout();
-    }, %(timeout)s);
-}
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/testharness_servodriver.js
+++ /dev/null
@@ -1,6 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-window.__wd_results_callback__ = arguments[arguments.length - 1];
-window.__wd_results_timer__ = setTimeout(timeout, %(timeout)s);
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/executors/testharness_webdriver.js
+++ /dev/null
@@ -1,33 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var callback = arguments[arguments.length - 1];
-window.timeout_multiplier = %(timeout_multiplier)d;
-
-window.addEventListener("message", function f(event) {
-  if (event.data.type != "complete") {
-    return;
-  }
-  window.removeEventListener("message", f);
-
-  var tests = event.data.tests;
-  var status = event.data.status;
-
-  var subtest_results = tests.map(function(x) {
-    return [x.name, x.status, x.message, x.stack]
-  });
-  clearTimeout(timer);
-  callback(["%(url)s",
-            status.status,
-            status.message,
-            status.stack,
-            subtest_results]);
-}, false);
-
-window.win = window.open("%(abs_url)s", "%(window_id)s");
-
-var timer = setTimeout(function() {
-  window.win.timeout();
-  window.win.close();
-}, %(timeout)s);
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/expected.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-
-
-def expected_path(metadata_path, test_path):
-    """Path to the expectation data file for a given test path.
-
-    This is defined as metadata_path + relative_test_path + .ini
-
-    :param metadata_path: Path to the root of the metadata directory
-    :param test_path: Relative path to the test file from the test root
-    """
-    args = list(test_path.split("/"))
-    args[-1] += ".ini"
-    return os.path.join(metadata_path, *args)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/hosts.py
+++ /dev/null
@@ -1,104 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-from __future__ import unicode_literals
-
-
-class HostsLine(object):
-    def __init__(self, ip_address, canonical_hostname, aliases=None, comment=None):
-        self.ip_address = ip_address
-        self.canonical_hostname = canonical_hostname
-        self.aliases = aliases if aliases is not None else []
-        self.comment = comment
-        if self.ip_address is None:
-            assert self.canonical_hostname is None
-            assert not self.aliases
-            assert self.comment is not None
-
-    @classmethod
-    def from_string(cls, line):
-        if not line.strip():
-            return
-
-        line = line.strip()
-
-        ip_address = None
-        canonical_hostname = None
-        aliases = []
-        comment = None
-
-        comment_parts = line.split("#", 1)
-        if len(comment_parts) > 1:
-            comment = comment_parts[1]
-
-        data = comment_parts[0].strip()
-
-        if data:
-            fields = data.split()
-            if len(fields) < 2:
-                raise ValueError("Invalid hosts line")
-
-            ip_address = fields[0]
-            canonical_hostname = fields[1]
-            aliases = fields[2:]
-
-        return cls(ip_address, canonical_hostname, aliases, comment)
-
-
-class HostsFile(object):
-    def __init__(self):
-        self.data = []
-        self.by_hostname = {}
-
-    def set_host(self, host):
-        if host.canonical_hostname is None:
-            self.data.append(host)
-        elif host.canonical_hostname in self.by_hostname:
-            old_host = self.by_hostname[host.canonical_hostname]
-            old_host.ip_address = host.ip_address
-            old_host.aliases = host.aliases
-            old_host.comment = host.comment
-        else:
-            self.data.append(host)
-            self.by_hostname[host.canonical_hostname] = host
-
-    @classmethod
-    def from_file(cls, f):
-        rv = cls()
-        for line in f:
-            host = HostsLine.from_string(line)
-            if host is not None:
-                rv.set_host(host)
-        return rv
-
-    def to_string(self):
-        field_widths = [0, 0]
-        for line in self.data:
-            if line.ip_address is not None:
-                field_widths[0] = max(field_widths[0], len(line.ip_address))
-                field_widths[1] = max(field_widths[1], len(line.canonical_hostname))
-
-        lines = []
-
-        for host in self.data:
-            line = ""
-            if host.ip_address is not None:
-                ip_string = host.ip_address.ljust(field_widths[0])
-                hostname_str = host.canonical_hostname
-                if host.aliases:
-                    hostname_str = "%s %s" % (hostname_str.ljust(field_widths[1]),
-                                              " ".join(host.aliases))
-                line = "%s %s" % (ip_string, hostname_str)
-            if host.comment:
-                if line:
-                    line += " "
-                line += "#%s" % host.comment
-            lines.append(line)
-
-        lines.append("")
-
-        return "\n".join(lines)
-
-    def to_file(self, f):
-        f.write(self.to_string().encode("utf8"))
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/manifestexpected.py
+++ /dev/null
@@ -1,266 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import urlparse
-
-from wptmanifest.backends import static
-from wptmanifest.backends.static import ManifestItem
-
-import expected
-
-"""Manifest structure used to store expected results of a test.
-
-Each manifest file is represented by an ExpectedManifest that
-has one or more TestNode children, one per test in the manifest.
-Each TestNode has zero or more SubtestNode children, one for each
-known subtest of the test.
-"""
-
-def data_cls_getter(output_node, visited_node):
-    # visited_node is intentionally unused
-    if output_node is None:
-        return ExpectedManifest
-    if isinstance(output_node, ExpectedManifest):
-        return TestNode
-    if isinstance(output_node, TestNode):
-        return SubtestNode
-    raise ValueError
-
-
-def bool_prop(name, node):
-    """Boolean property"""
-    try:
-        return node.get(name)
-    except KeyError:
-        return None
-
-
-def tags(node):
-    """Set of tags that have been applied to the test"""
-    try:
-        value = node.get("tags")
-        if isinstance(value, (str, unicode)):
-            return {value}
-        return set(value)
-    except KeyError:
-        return set()
-
-
-def prefs(node):
-    def value(ini_value):
-        if isinstance(ini_value, (str, unicode)):
-            return tuple(ini_value.split(":", 1))
-        else:
-            return (ini_value, None)
-
-    try:
-        node_prefs = node.get("prefs")
-        if type(node_prefs) in (str, unicode):
-            prefs = {value(node_prefs)}
-        rv = dict(value(item) for item in node_prefs)
-    except KeyError:
-        rv = {}
-    return rv
-
-
-class ExpectedManifest(ManifestItem):
-    def __init__(self, name, test_path, url_base):
-        """Object representing all the tests in a particular manifest
-
-        :param name: Name of the AST Node associated with this object.
-                     Should always be None since this should always be associated with
-                     the root node of the AST.
-        :param test_path: Path of the test file associated with this manifest.
-        :param url_base: Base url for serving the tests in this manifest
-        """
-        if name is not None:
-            raise ValueError("ExpectedManifest should represent the root node")
-        if test_path is None:
-            raise ValueError("ExpectedManifest requires a test path")
-        if url_base is None:
-            raise ValueError("ExpectedManifest requires a base url")
-        ManifestItem.__init__(self, name)
-        self.child_map = {}
-        self.test_path = test_path
-        self.url_base = url_base
-
-    def append(self, child):
-        """Add a test to the manifest"""
-        ManifestItem.append(self, child)
-        self.child_map[child.id] = child
-
-    def _remove_child(self, child):
-        del self.child_map[child.id]
-        ManifestItem.remove_child(self, child)
-        assert len(self.child_map) == len(self.children)
-
-    def get_test(self, test_id):
-        """Get a test from the manifest by ID
-
-        :param test_id: ID of the test to return."""
-        return self.child_map.get(test_id)
-
-    @property
-    def url(self):
-        return urlparse.urljoin(self.url_base,
-                                "/".join(self.test_path.split(os.path.sep)))
-
-    @property
-    def disabled(self):
-        return bool_prop("disabled", self)
-
-    @property
-    def restart_after(self):
-        return bool_prop("restart-after", self)
-
-    @property
-    def leaks(self):
-        return bool_prop("leaks", self)
-
-    @property
-    def tags(self):
-        return tags(self)
-
-    @property
-    def prefs(self):
-        return prefs(self)
-
-
-class DirectoryManifest(ManifestItem):
-    @property
-    def disabled(self):
-        return bool_prop("disabled", self)
-
-    @property
-    def restart_after(self):
-        return bool_prop("restart-after", self)
-
-    @property
-    def leaks(self):
-        return bool_prop("leaks", self)
-
-    @property
-    def tags(self):
-        return tags(self)
-
-    @property
-    def prefs(self):
-        return prefs(self)
-
-
-class TestNode(ManifestItem):
-    def __init__(self, name):
-        """Tree node associated with a particular test in a manifest
-
-        :param name: name of the test"""
-        assert name is not None
-        ManifestItem.__init__(self, name)
-        self.updated_expected = []
-        self.new_expected = []
-        self.subtests = {}
-        self.default_status = None
-        self._from_file = True
-
-    @property
-    def is_empty(self):
-        required_keys = set(["type"])
-        if set(self._data.keys()) != required_keys:
-            return False
-        return all(child.is_empty for child in self.children)
-
-    @property
-    def test_type(self):
-        return self.get("type")
-
-    @property
-    def id(self):
-        return urlparse.urljoin(self.parent.url, self.name)
-
-    @property
-    def disabled(self):
-        return bool_prop("disabled", self)
-
-    @property
-    def restart_after(self):
-        return bool_prop("restart-after", self)
-
-    @property
-    def leaks(self):
-        return bool_prop("leaks", self)
-
-    @property
-    def tags(self):
-        return tags(self)
-
-    @property
-    def prefs(self):
-        return prefs(self)
-
-    def append(self, node):
-        """Add a subtest to the current test
-
-        :param node: AST Node associated with the subtest"""
-        child = ManifestItem.append(self, node)
-        self.subtests[child.name] = child
-
-    def get_subtest(self, name):
-        """Get the SubtestNode corresponding to a particular subtest, by name
-
-        :param name: Name of the node to return"""
-        if name in self.subtests:
-            return self.subtests[name]
-        return None
-
-
-class SubtestNode(TestNode):
-    def __init__(self, name):
-        """Tree node associated with a particular subtest in a manifest
-
-        :param name: name of the subtest"""
-        TestNode.__init__(self, name)
-
-    @property
-    def is_empty(self):
-        if self._data:
-            return False
-        return True
-
-
-def get_manifest(metadata_root, test_path, url_base, run_info):
-    """Get the ExpectedManifest for a particular test path, or None if there is no
-    metadata stored for that test path.
-
-    :param metadata_root: Absolute path to the root of the metadata directory
-    :param test_path: Path to the test(s) relative to the test root
-    :param url_base: Base url for serving the tests in this manifest
-    :param run_info: Dictionary of properties of the test run for which the expectation
-                     values should be computed.
-    """
-    manifest_path = expected.expected_path(metadata_root, test_path)
-    try:
-        with open(manifest_path) as f:
-            return static.compile(f,
-                                  run_info,
-                                  data_cls_getter=data_cls_getter,
-                                  test_path=test_path,
-                                  url_base=url_base)
-    except IOError:
-        return None
-
-def get_dir_manifest(path, run_info):
-    """Get the ExpectedManifest for a particular test path, or None if there is no
-    metadata stored for that test path.
-
-    :param path: Full path to the ini file
-    :param run_info: Dictionary of properties of the test run for which the expectation
-                     values should be computed.
-    """
-    try:
-        with open(path) as f:
-            return static.compile(f,
-                                  run_info,
-                                  data_cls_getter=lambda x,y: DirectoryManifest)
-    except IOError:
-        return None
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/manifestinclude.py
+++ /dev/null
@@ -1,144 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-"""Manifest structure used to store paths that should be included in a test run.
-
-The manifest is represented by a tree of IncludeManifest objects, the root
-representing the file and each subnode representing a subdirectory that should
-be included or excluded.
-"""
-import glob
-import os
-import urlparse
-
-from wptmanifest.node import DataNode
-from wptmanifest.backends import conditional
-from wptmanifest.backends.conditional import ManifestItem
-
-
-class IncludeManifest(ManifestItem):
-    def __init__(self, node):
-        """Node in a tree structure representing the paths
-        that should be included or excluded from the test run.
-
-        :param node: AST Node corresponding to this Node.
-        """
-        ManifestItem.__init__(self, node)
-        self.child_map = {}
-
-    @classmethod
-    def create(cls):
-        """Create an empty IncludeManifest tree"""
-        node = DataNode(None)
-        return cls(node)
-
-    def append(self, child):
-        ManifestItem.append(self, child)
-        self.child_map[child.name] = child
-        assert len(self.child_map) == len(self.children)
-
-    def include(self, test):
-        """Return a boolean indicating whether a particular test should be
-        included in a test run, based on the IncludeManifest tree rooted on
-        this object.
-
-        :param test: The test object"""
-        path_components = self._get_components(test.url)
-        return self._include(test, path_components)
-
-    def _include(self, test, path_components):
-        if path_components:
-            next_path_part = path_components.pop()
-            if next_path_part in self.child_map:
-                return self.child_map[next_path_part]._include(test, path_components)
-
-        node = self
-        while node:
-            try:
-                skip_value = self.get("skip", {"test_type": test.item_type}).lower()
-                assert skip_value in ("true", "false")
-                return skip_value != "true"
-            except KeyError:
-                if node.parent is not None:
-                    node = node.parent
-                else:
-                    # Include by default
-                    return True
-
-    def _get_components(self, url):
-        rv = []
-        url_parts = urlparse.urlsplit(url)
-        variant = ""
-        if url_parts.query:
-            variant += "?" + url_parts.query
-        if url_parts.fragment:
-            variant += "#" + url_parts.fragment
-        if variant:
-            rv.append(variant)
-        rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
-        return rv
-
-    def _add_rule(self, test_manifests, url, direction):
-        maybe_path = os.path.join(os.path.abspath(os.curdir), url)
-        rest, last = os.path.split(maybe_path)
-        variant = ""
-        if "#" in last:
-            last, fragment = last.rsplit("#", 1)
-            variant += "#" + fragment
-        if "?" in last:
-            last, query = last.rsplit("?", 1)
-            variant += "?" + query
-
-        maybe_path = os.path.join(rest, last)
-        paths = glob.glob(maybe_path)
-
-        if paths:
-            urls = []
-            for path in paths:
-                for manifest, data in test_manifests.iteritems():
-                    rel_path = os.path.relpath(path, data["tests_path"])
-                    if ".." not in rel_path.split(os.sep):
-                        urls.append(data["url_base"] + rel_path.replace(os.path.sep, "/") + variant)
-                        break
-        else:
-            urls = [url]
-
-        assert direction in ("include", "exclude")
-
-        for url in urls:
-            components = self._get_components(url)
-
-            node = self
-            while components:
-                component = components.pop()
-                if component not in node.child_map:
-                    new_node = IncludeManifest(DataNode(component))
-                    node.append(new_node)
-                    new_node.set("skip", node.get("skip", {}))
-
-                node = node.child_map[component]
-
-            skip = False if direction == "include" else True
-            node.set("skip", str(skip))
-
-    def add_include(self, test_manifests, url_prefix):
-        """Add a rule indicating that tests under a url path
-        should be included in test runs
-
-        :param url_prefix: The url prefix to include
-        """
-        return self._add_rule(test_manifests, url_prefix, "include")
-
-    def add_exclude(self, test_manifests, url_prefix):
-        """Add a rule indicating that tests under a url path
-        should be excluded from test runs
-
-        :param url_prefix: The url prefix to exclude
-        """
-        return self._add_rule(test_manifests, url_prefix, "exclude")
-
-
-def get_manifest(manifest_path):
-    with open(manifest_path) as f:
-        return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/manifestupdate.py
+++ /dev/null
@@ -1,464 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import urlparse
-from collections import namedtuple, defaultdict
-
-from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
-                              BinaryOperatorNode, VariableNode, StringNode, NumberNode,
-                              UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
-from wptmanifest.backends import conditional
-from wptmanifest.backends.conditional import ManifestItem
-
-import expected
-
-"""Manifest structure used to update the expected results of a test
-
-Each manifest file is represented by an ExpectedManifest that has one
-or more TestNode children, one per test in the manifest.  Each
-TestNode has zero or more SubtestNode children, one for each known
-subtest of the test.
-
-In these representations, conditionals expressions in the manifest are
-not evaluated upfront but stored as python functions to be evaluated
-at runtime.
-
-When a result for a test is to be updated set_result on the
-[Sub]TestNode is called to store the new result, alongside the
-existing conditional that result's run info matched, if any. Once all
-new results are known, coalesce_expected is called to compute the new
-set of results and conditionals. The AST of the underlying parsed manifest
-is updated with the changes, and the result is serialised to a file.
-"""
-
-class ConditionError(Exception):
-    pass
-
-Result = namedtuple("Result", ["run_info", "status"])
-
-
-def data_cls_getter(output_node, visited_node):
-    # visited_node is intentionally unused
-    if output_node is None:
-        return ExpectedManifest
-    elif isinstance(output_node, ExpectedManifest):
-        return TestNode
-    elif isinstance(output_node, TestNode):
-        return SubtestNode
-    else:
-        raise ValueError
-
-
-class ExpectedManifest(ManifestItem):
-    def __init__(self, node, test_path=None, url_base=None, property_order=None,
-                 boolean_properties=None):
-        """Object representing all the tests in a particular manifest
-
-        :param node: AST Node associated with this object. If this is None,
-                     a new AST is created to associate with this manifest.
-        :param test_path: Path of the test file associated with this manifest.
-        :param url_base: Base url for serving the tests in this manifest.
-        :param property_order: List of properties to use in expectation metadata
-                               from most to least significant.
-        :param boolean_properties: Set of properties in property_order that should
-                                   be treated as boolean.
-        """
-        if node is None:
-            node = DataNode(None)
-        ManifestItem.__init__(self, node)
-        self.child_map = {}
-        self.test_path = test_path
-        self.url_base = url_base
-        assert self.url_base is not None
-        self.modified = False
-        self.boolean_properties = boolean_properties
-        self.property_order = property_order
-
-    def append(self, child):
-        ManifestItem.append(self, child)
-        if child.id in self.child_map:
-            print "Warning: Duplicate heading %s" % child.id
-        self.child_map[child.id] = child
-
-    def _remove_child(self, child):
-        del self.child_map[child.id]
-        ManifestItem._remove_child(self, child)
-
-    def get_test(self, test_id):
-        """Return a TestNode by test id, or None if no test matches
-
-        :param test_id: The id of the test to look up"""
-
-        return self.child_map[test_id]
-
-    def has_test(self, test_id):
-        """Boolean indicating whether the current test has a known child test
-        with id test id
-
-        :param test_id: The id of the test to look up"""
-
-        return test_id in self.child_map
-
-    @property
-    def url(self):
-        return urlparse.urljoin(self.url_base,
-                                "/".join(self.test_path.split(os.path.sep)))
-
-class TestNode(ManifestItem):
-    def __init__(self, node):
-        """Tree node associated with a particular test in a manifest
-
-        :param node: AST node associated with the test"""
-
-        ManifestItem.__init__(self, node)
-        self.updated_expected = []
-        self.new_expected = []
-        self.subtests = {}
-        self.default_status = None
-        self._from_file = True
-
-    @classmethod
-    def create(cls, test_type, test_id):
-        """Create a TestNode corresponding to a given test
-
-        :param test_type: The type of the test
-        :param test_id: The id of the test"""
-
-        url = test_id
-        name = url.split("/")[-1]
-        node = DataNode(name)
-        self = cls(node)
-
-        self.set("type", test_type)
-        self._from_file = False
-        return self
-
-    @property
-    def is_empty(self):
-        required_keys = set(["type"])
-        if set(self._data.keys()) != required_keys:
-            return False
-        return all(child.is_empty for child in self.children)
-
-    @property
-    def test_type(self):
-        """The type of the test represented by this TestNode"""
-
-        return self.get("type", None)
-
-    @property
-    def id(self):
-        """The id of the test represented by this TestNode"""
-        return urlparse.urljoin(self.parent.url, self.name)
-
-    def disabled(self, run_info):
-        """Boolean indicating whether this test is disabled when run in an
-        environment with the given run_info
-
-        :param run_info: Dictionary of run_info parameters"""
-
-        return self.get("disabled", run_info) is not None
-
-    def set_result(self, run_info, result):
-        """Set the result of the test in a particular run
-
-        :param run_info: Dictionary of run_info parameters corresponding
-                         to this run
-        :param result: Status of the test in this run"""
-
-        if self.default_status is not None:
-            assert self.default_status == result.default_expected
-        else:
-            self.default_status = result.default_expected
-
-        # Add this result to the list of results satisfying
-        # any condition in the list of updated results it matches
-        for (cond, values) in self.updated_expected:
-            if cond(run_info):
-                values.append(Result(run_info, result.status))
-                if result.status != cond.value:
-                    self.root.modified = True
-                break
-        else:
-            # We didn't find a previous value for this
-            self.new_expected.append(Result(run_info, result.status))
-            self.root.modified = True
-
-    def coalesce_expected(self):
-        """Update the underlying manifest AST for this test based on all the
-        added results.
-
-        This will update existing conditionals if they got the same result in
-        all matching runs in the updated results, will delete existing conditionals
-        that get more than one different result in the updated run, and add new
-        conditionals for anything that doesn't match an existing conditional.
-
-        Conditionals not matched by any added result are not changed."""
-
-        final_conditionals = []
-
-        try:
-            unconditional_status = self.get("expected")
-        except KeyError:
-            unconditional_status = self.default_status
-
-        for conditional_value, results in self.updated_expected:
-            if not results:
-                # The conditional didn't match anything in these runs so leave it alone
-                final_conditionals.append(conditional_value)
-            elif all(results[0].status == result.status for result in results):
-                # All the new values for this conditional matched, so update the node
-                result = results[0]
-                if (result.status == unconditional_status and
-                    conditional_value.condition_node is not None):
-                    if "expected" in self:
-                        self.remove_value("expected", conditional_value)
-                else:
-                    conditional_value.value = result.status
-                    final_conditionals.append(conditional_value)
-            elif conditional_value.condition_node is not None:
-                # Blow away the existing condition and rebuild from scratch
-                # This isn't sure to work if we have a conditional later that matches
-                # these values too, but we can hope, verify that we get the results
-                # we expect, and if not let a human sort it out
-                self.remove_value("expected", conditional_value)
-                self.new_expected.extend(results)
-            elif conditional_value.condition_node is None:
-                self.new_expected.extend(result for result in results
-                                         if result.status != unconditional_status)
-
-        # It is an invariant that nothing in new_expected matches an existing
-        # condition except for the default condition
-
-        if self.new_expected:
-            if all(self.new_expected[0].status == result.status
-                   for result in self.new_expected) and not self.updated_expected:
-                status = self.new_expected[0].status
-                if status != self.default_status:
-                    self.set("expected", status, condition=None)
-                    final_conditionals.append(self._data["expected"][-1])
-            else:
-                try:
-                    conditionals = group_conditionals(
-                        self.new_expected,
-                        property_order=self.root.property_order,
-                        boolean_properties=self.root.boolean_properties)
-                except ConditionError:
-                    print "Conflicting test results for %s, cannot update" % self.root.test_path
-                    return
-                for conditional_node, status in conditionals:
-                    if status != unconditional_status:
-                        self.set("expected", status, condition=conditional_node.children[0])
-                        final_conditionals.append(self._data["expected"][-1])
-
-        if ("expected" in self._data and
-            len(self._data["expected"]) > 0 and
-            self._data["expected"][-1].condition_node is None and
-            self._data["expected"][-1].value == self.default_status):
-
-            self.remove_value("expected", self._data["expected"][-1])
-
-        if ("expected" in self._data and
-            len(self._data["expected"]) == 0):
-            for child in self.node.children:
-                if (isinstance(child, KeyValueNode) and
-                    child.data == "expected"):
-                    child.remove()
-                    break
-
-    def _add_key_value(self, node, values):
-        ManifestItem._add_key_value(self, node, values)
-        if node.data == "expected":
-            self.updated_expected = []
-            for value in values:
-                self.updated_expected.append((value, []))
-
-    def clear_expected(self):
-        """Clear all the expected data for this test and all of its subtests"""
-
-        self.updated_expected = []
-        if "expected" in self._data:
-            for child in self.node.children:
-                if (isinstance(child, KeyValueNode) and
-                    child.data == "expected"):
-                    child.remove()
-                    del self._data["expected"]
-                    break
-
-        for subtest in self.subtests.itervalues():
-            subtest.clear_expected()
-
-    def append(self, node):
-        child = ManifestItem.append(self, node)
-        self.subtests[child.name] = child
-
-    def get_subtest(self, name):
-        """Return a SubtestNode corresponding to a particular subtest of
-        the current test, creating a new one if no subtest with that name
-        already exists.
-
-        :param name: Name of the subtest"""
-
-        if name in self.subtests:
-            return self.subtests[name]
-        else:
-            subtest = SubtestNode.create(name)
-            self.append(subtest)
-            return subtest
-
-
-class SubtestNode(TestNode):
-    def __init__(self, node):
-        assert isinstance(node, DataNode)
-        TestNode.__init__(self, node)
-
-    @classmethod
-    def create(cls, name):
-        node = DataNode(name)
-        self = cls(node)
-        return self
-
-    @property
-    def is_empty(self):
-        if self._data:
-            return False
-        return True
-
-
-def group_conditionals(values, property_order=None, boolean_properties=None):
-    """Given a list of Result objects, return a list of
-    (conditional_node, status) pairs representing the conditional
-    expressions that are required to match each status
-
-    :param values: List of Results
-    :param property_order: List of properties to use in expectation metadata
-                           from most to least significant.
-    :param boolean_properties: Set of properties in property_order that should
-                               be treated as boolean."""
-
-    by_property = defaultdict(set)
-    for run_info, status in values:
-        for prop_name, prop_value in run_info.iteritems():
-            by_property[(prop_name, prop_value)].add(status)
-
-    if property_order is None:
-        property_order = ["debug", "os", "version", "processor", "bits"]
-
-    if boolean_properties is None:
-        boolean_properties = set(["debug"])
-    else:
-        boolean_properties = set(boolean_properties)
-
-    # If we have more than one value, remove any properties that are common
-    # for all the values
-    if len(values) > 1:
-        for key, statuses in by_property.copy().iteritems():
-            if len(statuses) == len(values):
-                del by_property[key]
-        if not by_property:
-            raise ConditionError
-
-    properties = set(item[0] for item in by_property.iterkeys())
-    include_props = []
-
-    for prop in property_order:
-        if prop in properties:
-            include_props.append(prop)
-
-    conditions = {}
-
-    for run_info, status in values:
-        prop_set = tuple((prop, run_info[prop]) for prop in include_props)
-        if prop_set in conditions:
-            continue
-
-        expr = make_expr(prop_set, status, boolean_properties=boolean_properties)
-        conditions[prop_set] = (expr, status)
-
-    return conditions.values()
-
-
-def make_expr(prop_set, status, boolean_properties=None):
-    """Create an AST that returns the value ``status`` given all the
-    properties in prop_set match.
-
-    :param prop_set: tuple of (property name, value) pairs for each
-                     property in this expression and the value it must match
-    :param status: Status on RHS when all the given properties match
-    :param boolean_properties: Set of properties in property_order that should
-                               be treated as boolean.
-    """
-    root = ConditionalNode()
-
-    assert len(prop_set) > 0
-
-    expressions = []
-    for prop, value in prop_set:
-        number_types = (int, float, long)
-        value_cls = (NumberNode
-                     if type(value) in number_types
-                     else StringNode)
-        if prop not in boolean_properties:
-            expressions.append(
-                BinaryExpressionNode(
-                    BinaryOperatorNode("=="),
-                    VariableNode(prop),
-                    value_cls(unicode(value))
-                ))
-        else:
-            if value:
-                expressions.append(VariableNode(prop))
-            else:
-                expressions.append(
-                    UnaryExpressionNode(
-                        UnaryOperatorNode("not"),
-                        VariableNode(prop)
-                    ))
-    if len(expressions) > 1:
-        prev = expressions[-1]
-        for curr in reversed(expressions[:-1]):
-            node = BinaryExpressionNode(
-                BinaryOperatorNode("and"),
-                curr,
-                prev)
-            prev = node
-    else:
-        node = expressions[0]
-
-    root.append(node)
-    root.append(StringNode(status))
-
-    return root
-
-
-def get_manifest(metadata_root, test_path, url_base, property_order=None,
-                 boolean_properties=None):
-    """Get the ExpectedManifest for a particular test path, or None if there is no
-    metadata stored for that test path.
-
-    :param metadata_root: Absolute path to the root of the metadata directory
-    :param test_path: Path to the test(s) relative to the test root
-    :param url_base: Base url for serving the tests in this manifest
-    :param property_order: List of properties to use in expectation metadata
-                           from most to least significant.
-    :param boolean_properties: Set of properties in property_order that should
-                               be treated as boolean."""
-    manifest_path = expected.expected_path(metadata_root, test_path)
-    try:
-        with open(manifest_path) as f:
-            return compile(f, test_path, url_base, property_order=property_order,
-                           boolean_properties=boolean_properties)
-    except IOError:
-        return None
-
-
-def compile(manifest_file, test_path, url_base, property_order=None,
-            boolean_properties=None):
-    return conditional.compile(manifest_file,
-                               data_cls_getter=data_cls_getter,
-                               test_path=test_path,
-                               url_base=url_base,
-                               property_order=property_order,
-                               boolean_properties=boolean_properties)
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/metadata.py
+++ /dev/null
@@ -1,355 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this
-# file, You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import shutil
-import sys
-import tempfile
-import types
-import uuid
-from collections import defaultdict
-
-from mozlog import reader
-from mozlog import structuredlog
-
-import expected
-import manifestupdate
-import testloader
-import wptmanifest
-import wpttest
-from vcs import git
-manifest = None  # Module that will be imported relative to test_root
-manifestitem = None
-
-logger = structuredlog.StructuredLogger("web-platform-tests")
-
-
-def load_test_manifests(serve_root, test_paths):
-    do_delayed_imports(serve_root)
-    manifest_loader = testloader.ManifestLoader(test_paths, False)
-    return manifest_loader.load()
-
-
-def update_expected(test_paths, serve_root, log_file_names,
-                    rev_old=None, rev_new="HEAD", ignore_existing=False,
-                    sync_root=None, property_order=None, boolean_properties=None):
-    """Update the metadata files for web-platform-tests based on
-    the results obtained in a previous run"""
-
-    manifests = load_test_manifests(serve_root, test_paths)
-
-    change_data = {}
-
-    if sync_root is not None:
-        if rev_old is not None:
-            rev_old = git("rev-parse", rev_old, repo=sync_root).strip()
-        rev_new = git("rev-parse", rev_new, repo=sync_root).strip()
-
-        if rev_old is not None:
-            change_data = load_change_data(rev_old, rev_new, repo=sync_root)
-
-
-    expected_map_by_manifest = update_from_logs(manifests,
-                                                *log_file_names,
-                                                ignore_existing=ignore_existing,
-                                                property_order=property_order,
-                                                boolean_properties=boolean_properties)
-
-    for test_manifest, expected_map in expected_map_by_manifest.iteritems():
-        url_base = manifests[test_manifest]["url_base"]
-        metadata_path = test_paths[url_base]["metadata_path"]
-        write_changes(metadata_path, expected_map)
-
-    results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
-
-    return unexpected_changes(manifests, change_data, results_changed)
-
-
-def do_delayed_imports(serve_root):
-    global manifest, manifestitem
-    from manifest import manifest, item as manifestitem
-
-
-def files_in_repo(repo_root):
-    return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
-
-
-def rev_range(rev_old, rev_new, symmetric=False):
-    joiner = ".." if not symmetric else "..."
-    return "".join([rev_old, joiner, rev_new])
-
-
-def paths_changed(rev_old, rev_new, repo):
-    data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
-    lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
-             for line in data.split("\n") if line.strip()]
-    output = set(lines)
-    return output
-
-
-def load_change_data(rev_old, rev_new, repo):
-    changes = paths_changed(rev_old, rev_new, repo)
-    rv = {}
-    status_keys = {"M": "modified",
-                   "A": "new",
-                   "D": "deleted"}
-    # TODO: deal with renames
-    for item in changes:
-        rv[item[1]] = status_keys[item[0]]
-    return rv
-
-
-def unexpected_changes(manifests, change_data, files_changed):
-    files_changed = set(files_changed)
-
-    root_manifest = None
-    for manifest, paths in manifests.iteritems():
-        if paths["url_base"] == "/":
-            root_manifest = manifest
-            break
-    else:
-        return []
-
-    rv = []
-
-    return [fn for _, fn, _ in root_manifest if fn in files_changed and change_data.get(fn) != "M"]
-
-# For each testrun
-# Load all files and scan for the suite_start entry
-# Build a hash of filename: properties
-# For each different set of properties, gather all chunks
-# For each chunk in the set of chunks, go through all tests
-# for each test, make a map of {conditionals: [(platform, new_value)]}
-# Repeat for each platform
-# For each test in the list of tests:
-#   for each conditional:
-#      If all the new values match (or there aren't any) retain that conditional
-#      If any new values mismatch mark the test as needing human attention
-#   Check if all the RHS values are the same; if so collapse the conditionals
-
-
-def update_from_logs(manifests, *log_filenames, **kwargs):
-    ignore_existing = kwargs.get("ignore_existing", False)
-    property_order = kwargs.get("property_order")
-    boolean_properties = kwargs.get("boolean_properties")
-
-    expected_map = {}
-    id_test_map = {}
-
-    for test_manifest, paths in manifests.iteritems():
-        expected_map_manifest, id_path_map_manifest = create_test_tree(
-            paths["metadata_path"],
-            test_manifest,
-            property_order=property_order,
-            boolean_properties=boolean_properties)
-        expected_map[test_manifest] = expected_map_manifest
-        id_test_map.update(id_path_map_manifest)
-
-    updater = ExpectedUpdater(manifests, expected_map, id_test_map,
-                              ignore_existing=ignore_existing)
-    for log_filename in log_filenames:
-        with open(log_filename) as f:
-            updater.update_from_log(f)
-
-    for manifest_expected in expected_map.itervalues():
-        for tree in manifest_expected.itervalues():
-            for test in tree.iterchildren():
-                for subtest in test.iterchildren():
-                    subtest.coalesce_expected()
-                test.coalesce_expected()
-
-    return expected_map
-
-def directory_manifests(metadata_path):
-    rv = []
-    for dirpath, dirname, filenames in os.walk(metadata_path):
-        if "__dir__.ini" in filenames:
-            rel_path = os.path.relpath(dirpath, metadata_path)
-            rv.append(os.path.join(rel_path, "__dir__.ini"))
-    return rv
-
-def write_changes(metadata_path, expected_map):
-    # First write the new manifest files to a temporary directory
-    temp_path = tempfile.mkdtemp(dir=os.path.split(metadata_path)[0])
-    write_new_expected(temp_path, expected_map)
-
-    # Keep all __dir__.ini files (these are not in expected_map because they
-    # aren't associated with a specific test)
-    keep_files = directory_manifests(metadata_path)
-
-    # Copy all files in the root to the temporary location since
-    # these cannot be ini files
-    keep_files.extend(item for item in os.listdir(metadata_path) if
-                      not os.path.isdir(os.path.join(metadata_path, item)))
-
-    for item in keep_files:
-        dest_dir = os.path.dirname(os.path.join(temp_path, item))
-        if not os.path.exists(dest_dir):
-            os.makedirs(dest_dir)
-        shutil.copyfile(os.path.join(metadata_path, item),
-                        os.path.join(temp_path, item))
-
-    # Then move the old manifest files to a new location
-    temp_path_2 = metadata_path + str(uuid.uuid4())
-    os.rename(metadata_path, temp_path_2)
-    # Move the new files to the destination location and remove the old files
-    os.rename(temp_path, metadata_path)
-    shutil.rmtree(temp_path_2)
-
-
-def write_new_expected(metadata_path, expected_map):
-    # Serialize the data back to a file
-    for tree in expected_map.itervalues():
-        if not tree.is_empty:
-            manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
-            assert manifest_str != ""
-            path = expected.expected_path(metadata_path, tree.test_path)
-            dir = os.path.split(path)[0]
-            if not os.path.exists(dir):
-                os.makedirs(dir)
-            with open(path, "w") as f:
-                f.write(manifest_str)
-
-
-class ExpectedUpdater(object):
-    def __init__(self, test_manifests, expected_tree, id_path_map, ignore_existing=False):
-        self.test_manifests = test_manifests
-        self.expected_tree = expected_tree
-        self.id_path_map = id_path_map
-        self.ignore_existing = ignore_existing
-        self.run_info = None
-        self.action_map = {"suite_start": self.suite_start,
-                           "test_start": self.test_start,
-                           "test_status": self.test_status,
-                           "test_end": self.test_end}
-        self.tests_visited = {}
-
-        self.test_cache = {}
-
-    def update_from_log(self, log_file):
-        self.run_info = None
-        log_reader = reader.read(log_file)
-        reader.each_log(log_reader, self.action_map)
-
-    def suite_start(self, data):
-        self.run_info = data["run_info"]
-
-    def test_id(self, id):
-        if type(id) in types.StringTypes:
-            return id
-        else:
-            return tuple(id)
-
-    def test_start(self, data):
-        test_id = self.test_id(data["test"])
-        try:
-            test_manifest, test = self.id_path_map[test_id]
-            expected_node = self.expected_tree[test_manifest][test].get_test(test_id)
-        except KeyError:
-            print "Test not found %s, skipping" % test_id
-            return
-        self.test_cache[test_id] = expected_node
-
-        if test_id not in self.tests_visited:
-            if self.ignore_existing:
-                expected_node.clear_expected()
-            self.tests_visited[test_id] = set()
-
-    def test_status(self, data):
-        test_id = self.test_id(data["test"])
-        test = self.test_cache.get(test_id)
-        if test is None:
-            return
-        test_cls = wpttest.manifest_test_cls[test.test_type]
-
-        subtest = test.get_subtest(data["subtest"])
-
-        self.tests_visited[test.id].add(data["subtest"])
-
-        result = test_cls.subtest_result_cls(
-            data["subtest"],
-            data["status"],
-            data.get("message"))
-
-        subtest.set_result(self.run_info, result)
-
-    def test_end(self, data):
-        test_id = self.test_id(data["test"])
-        test = self.test_cache.get(test_id)
-        if test is None:
-            return
-        test_cls = wpttest.manifest_test_cls[test.test_type]
-
-        if data["status"] == "SKIP":
-            return
-
-        result = test_cls.result_cls(
-            data["status"],
-            data.get("message"))
-
-        test.set_result(self.run_info, result)
-        del self.test_cache[test_id]
-
-
-def create_test_tree(metadata_path, test_manifest, property_order=None,
-                     boolean_properties=None):
-    expected_map = {}
-    id_test_map = {}
-    exclude_types = frozenset(["stub", "helper", "manual", "support", "conformancechecker"])
-    all_types = [item.item_type for item in manifestitem.__dict__.itervalues()
-                 if type(item) == type and
-                 issubclass(item, manifestitem.ManifestItem) and
-                 item.item_type is not None]
-    include_types = set(all_types) - exclude_types
-    for _, test_path, tests in test_manifest.itertypes(*include_types):
-        expected_data = load_expected(test_manifest, metadata_path, test_path, tests,
-                                      property_order=property_order,
-                                      boolean_properties=boolean_properties)
-        if expected_data is None:
-            expected_data = create_expected(test_manifest,
-                                            test_path,
-                                            tests,
-                                            property_order=property_order,
-                                            boolean_properties=boolean_properties)
-
-        for test in tests:
-            id_test_map[test.id] = (test_manifest, test)
-            expected_map[test] = expected_data
-
-    return expected_map, id_test_map
-
-
-def create_expected(test_manifest, test_path, tests, property_order=None,
-                    boolean_properties=None):
-    expected = manifestupdate.ExpectedManifest(None, test_path, test_manifest.url_base,
-                                               property_order=property_order,
-                                               boolean_properties=boolean_properties)
-    for test in tests:
-        expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
-    return expected
-
-
-def load_expected(test_manifest, metadata_path, test_path, tests, property_order=None,
-                  boolean_properties=None):
-    expected_manifest = manifestupdate.get_manifest(metadata_path,
-                                                    test_path,
-                                                    test_manifest.url_base,
-                                                    property_order=property_order,
-                                                    boolean_properties=boolean_properties)
-    if expected_manifest is None:
-        return
-
-    tests_by_id = {item.id: item for item in tests}
-
-    # Remove expected data for tests that no longer exist
-    for test in expected_manifest.iterchildren():
-        if not test.id in tests_by_id:
-            test.remove()
-
-    # Add tests that don't have expected data
-    for test in tests:
-        if not expected_manifest.has_test(test.id):
-            expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
-
-    return expected_manifest
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/products.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import os
-import importlib
-import imp
-
-from .browsers import product_list
-
-def products_enabled(config):
-    names = config.get("products", {}).keys()
-    if not names:
-        return product_list
-    else:
-        return names
-
-def product_module(config, product):
-    here = os.path.join(os.path.split(__file__)[0])
-    product_dir = os.path.join(here, "browsers")
-
-    if product not in products_enabled(config):
-        raise ValueError("Unknown product %s" % product)
-
-    path = config.get("products", {}).get(product, None)
-    if path:
-        module = imp.load_source('wptrunner.browsers.' + product, path)
-    else:
-        module = importlib.import_module("wptrunner.browsers." + product)
-
-    if not hasattr(module, "__wptrunner__"):
-        raise ValueError("Product module does not define __wptrunner__ variable")
-
-    return module
-
-
-def load_product(config, product):
-    module = product_module(config, product)
-    data = module.__wptrunner__
-
-    check_args = getattr(module, data["check_args"])
-    browser_cls = getattr(module, data["browser"])
-    browser_kwargs = getattr(module, data["browser_kwargs"])
-    executor_kwargs = getattr(module, data["executor_kwargs"])
-    env_options = getattr(module, data["env_options"])()
-    run_info_extras = (getattr(module, data["run_info_extras"])
-                       if "run_info_extras" in data else lambda **kwargs:{})
-
-    executor_classes = {}
-    for test_type, cls_name in data["executor"].iteritems():
-        cls = getattr(module, cls_name)
-        executor_classes[test_type] = cls
-
-    return (check_args,
-            browser_cls, browser_kwargs,
-            executor_classes, executor_kwargs,
-            env_options, run_info_extras)
-
-
-def load_product_update(config, product):
-    """Return tuple of (property_order, boolean_properties) indicating the
-    run_info properties to use when constructing the expectation data for
-    this product. None for either key indicates that the default keys
-    appropriate for distinguishing based on platform will be used."""
-
-    module = product_module(config, product)
-    data = module.__wptrunner__
-
-    update_properties = (getattr(module, data["update_properties"])()
-                         if "update_properties" in data else (None, None))
-
-    return update_properties
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/reduce.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# This Source Code Form is subject to the terms of the Mozilla Public
-# License, v. 2.0. If a copy of the MPL was not distributed with this file,
-# You can obtain one at http://mozilla.org/MPL/2.0/.
-
-import sys
-import tempfile
-from cStringIO import StringIO
-from collections import defaultdict
-
-import wptrunner
-import wpttest
-
-from mozlog import commandline, reader
-
-logger = None
-
-
-def setup_logging(args, defaults):
-    global logger
-    logger = commandline.setup_logging("web-platform-tests-unstable", args, defaults)
-    wptrunner.setup_stdlib_logger()
-
-    for name in args.keys():
-        if name.startswith("log_"):
-            args.pop(name)
-
-    return logger
-
-
-def group(items, size):
-    rv = []
-    i = 0
-    while i < len(items):
-        rv.append(items[i:i + size])
-        i += size
-
-    return rv
-
-
-def next_power_of_two(num):
-    rv = 1
-    while rv < num:
-        rv = rv << 1
-    return rv
-
-
-class Reducer(object):
-    def __init__(self, target, **kwargs):
-        self.target = target
-
-        self.test_type = kwargs["test_types"][0]
-        run_info = wpttest.get_run_info(kwargs["metadata_root"],
-                                        kwargs["product"],
-                                        debug=False)
-        test_filter = wptrunner.TestFilter(include=kwargs["include"])
-        self.test_loader = wptrunner.TestLoader(kwargs["tests_root"],
-                                                kwargs["metadata_root"],
-                                                [self.test_type],
-                                                run_info,
-                                                manifest_filer=test_filter)
-        if kwargs["repeat"] == 1:
-            logger.critical("Need to specify --repeat with more than one repetition")
-            sys.exit(1)
-        self.kwargs = kwargs
-
-    def run(self):
-        all_tests = self.get_initial_tests()
-
-        tests = all_tests[:-1]
-        target_test = [all_tests[-1]]
-
-        if self.unstable(target_test):
-            return target_test
-
-        if not self.unstable(all_tests):
-            return []
-
-        chunk_size = next_power_of_two(int(len(tests) / 2))
-        logger.debug("Using chunk size %i" % chunk_size)
-
-        while chunk_size >= 1:
-            logger.debug("%i tests remain" % len(tests))
-            chunks = group(tests, chunk_size)
-            chunk_results = [None] * len(chunks)
-
-            for i, chunk in enumerate(chunks):
-                logger.debug("Running chunk %i/%i of size %i" % (i + 1, len(chunks), chunk_size))
-                trial_tests = []
-                chunk_str = ""
-                for j, inc_chunk in enumerate(chunks):
-                    if i != j and chunk_results[j] in (None, False):
-                        chunk_str += "+"
-                        trial_tests.extend(inc_chunk)
-                    else:
-                        chunk_str += "-"
-                logger.debug("Using chunks %s" % chunk_str)
-                trial_tests.extend(target_test)
-
-                chunk_results[i] = self.unstable(trial_tests)
-
-                # if i == len(chunks) - 2 and all(item is False for item in chunk_results[:-1]):
-                # Dangerous? optimisation that if you got stability for 0..N-1 chunks
-                # it must be unstable with the Nth chunk
-                #     chunk_results[i+1] = True
-                #     continue
-
-            new_tests = []
-            keep_str = ""
-            for result, chunk in zip(chunk_results, chunks):
-                if not result:
-                    keep_str += "+"
-                    new_tests.extend(chunk)
-                else:
-                    keep_str += "-"
-
-            logger.debug("Keeping chunks %s" % keep_str)
-
-            tests = new_tests
-
-            chunk_size = int(chunk_size / 2)
-
-        return tests + target_test
-
-    def unstable(self, tests):
-        logger.debug("Running with %i tests" % len(tests))
-
-        self.test_loader.tests = {self.test_type: tests}
-
-        stdout, stderr = sys.stdout, sys.stderr
-        sys.stdout = StringIO()
-        sys.stderr = StringIO()
-
-        with tempfile.NamedTemporaryFile() as f:
-            args = self.kwargs.copy()
-            args["log_raw"] = [f]
-            args["capture_stdio"] = False
-            wptrunner.setup_logging(args, {})
-            wptrunner.run_tests(test_loader=self.test_loader, **args)
-            wptrunner.logger.remove_handler(wptrunner.logger.handlers[0])
-            is_unstable = self.log_is_unstable(f)
-
-            sys.stdout, sys.stderr = stdout, stderr
-
-        logger.debug("Result was unstable with chunk removed"
-                     if is_unstable else "stable")
-
-        return is_unstable
-
-    def log_is_unstable(self, log_f):
-        log_f.seek(0)
-
-        statuses = defaultdict(set)
-
-        def handle_status(item):
-            if item["test"] == self.target:
-                statuses[item["subtest"]].add(item["status"])
-
-        def handle_end(item):
-            if item["test"] == self.target:
-                statuses[None].add(item["status"])
-
-        reader.each_log(reader.read(log_f),
-                        {"test_status": handle_status,
-                         "test_end": handle_end})
-
-        logger.debug(str(statuses))
-
-        if not statuses:
-            logger.error("Didn't get any useful output from wptrunner")
-            log_f.seek(0)
-            for item in reader.read(log_f):
-                logger.debug(item)
-            return None
-
-        return any(len(item) > 1 for item in statuses.itervalues())
-
-    def get_initial_tests(self):
-        # Need to pass in arguments
-
-        all_tests = self.test_loader.tests[self.test_type]
-        tests = []
-        for item in all_tests:
-            tests.append(item)
-            if item.url == self.target:
-                break
-
-        logger.debug("Starting with tests: %s" % ("\n".join(item.id for item in tests)))
-
-        return tests
-
-
-def do_reduce(**kwargs):
-    target = kwargs.pop("target")
-    reducer = Reducer(target, **kwargs)
-
-    unstable_set = reducer.run()
-    return unstable_set
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/testharnessreport-servo.js
+++ /dev/null
@@ -1,21 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var props = {output:%(output)d};
-var start_loc = document.createElement('a');
-start_loc.href = location.href;
-setup(props);
-
-add_completion_callback(function (tests, harness_status) {
-    var id = start_loc.pathname + start_loc.search + start_loc.hash;
-    console.log("ALERT: RESULT: " + JSON.stringify([
-        id,
-        harness_status.status,
-        harness_status.message,
-        harness_status.stack,
-        tests.map(function(t) {
-            return [t.name, t.status, t.message, t.stack]
-        }),
-    ]));
-});
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/testharnessreport-servodriver.js
+++ /dev/null
@@ -1,27 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-setup({output:%(output)d});
-
-add_completion_callback(function() {
-    add_completion_callback(function (tests, status) {
-        var subtest_results = tests.map(function(x) {
-            return [x.name, x.status, x.message, x.stack]
-        });
-        var id = location.pathname + location.search + location.hash;
-        var results = JSON.stringify([id,
-                                      status.status,
-                                      status.message,
-                                      status.stack,
-                                      subtest_results]);
-        (function done() {
-            if (window.__wd_results_callback__) {
-                clearTimeout(__wd_results_timer__);
-                __wd_results_callback__(results)
-            } else {
-                setTimeout(done, 20);
-            }
-        })()
-    })
-});
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/testharnessreport.js
+++ /dev/null
@@ -1,17 +0,0 @@
-/* This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-var props = {output:%(output)d,
-             explicit_timeout: true,
-             message_events: ["completion"]};
-
-if (window.opener && "timeout_multiplier" in window.opener) {
-    props["timeout_multiplier"] = window.opener.timeout_multiplier;
-}
-
-if (window.opener && window.opener.explicit_timeout) {
-    props["explicit_timeout"] = window.opener.explicit_timeout;
-}
-
-setup(props);
deleted file mode 100644
--- a/testing/web-platform/harness/wptrunner/testloader.py
+++ /dev/null
@@ -1,637 +0,0 @@
-import hashlib
-import json
-import os
-import urlparse
-from abc import ABCMeta, abstractmethod
-from Queue import Empty
-from collections import defaultdict, OrderedDict, deque
-from multiprocessing import Queue
-
-import manifestinclude
-import manifestexpected
-import wpttest
-from mozlog import structured
-
-manifest = None
-manifest_update = None
-
-def do_delayed_imports():
-    # This relies on an already loaded module having set the sys.path correctly :(
-    global manifest, manifest_update
-    from manifest import manifest
-    from manifest import update as manifest_update
-
-class TestChunker(object):
-    def __init__(self, total_chunks, chunk_number):
-        self.total_chunks = total_chunks
-        self.chunk_number = chunk_number
-        assert self.chunk_number <= self.total_chunks
-        self.logger = structured.get_default_logger()
-        assert self.logger
-
-    def __call__(self, manifest):
-        raise NotImplementedError
-
-
-class Unchunked(TestChunker):
-    def __init__(self, *args, **kwargs):
-        TestChunker.__init__(self, *args, **kwargs)
-        assert self.total_chunks == 1
-
-    def __call__(self, manifest):
-        for item in manifest:
-            yield item
-
-
-class HashChunker(TestChunker):
-    def __call__(self, manifest):
-        chunk_index = self.chunk_number - 1
-        for test_type, test_path, tests in manifest:
-            h = int(hashlib.md5(test_path).hexdigest(), 16)
-            if h % self.total_chunks == chunk_index:
-                yield test_type, test_path, tests
-
-
-class DirectoryHashChunker(TestChunker):
-    """Like HashChunker except the directory is hashed.
-
-    This ensures that all tests in the same directory end up in the same
-    chunk.
-    """
-    def __call__(self, manifest):
-        chunk_index = self.chunk_number - 1
-        for test_type, test_path, tests in manifest:
-            h = int(hashlib.md5(os.path.dirname(test_path)).hexdigest(), 16)
-            if h % self.total_chunks == chunk_index:
-                yield test_type, test_path, tests
-
-
-class EqualTimeChunker(TestChunker):
-    def _group_by_directory(self, manifest_items):
-        """Split the list of manifest items into a ordered dict that groups tests in
-        so that anything in the same subdirectory beyond a depth of 3 is in the same
-        group. So all tests in a/b/c, a/b/c/d and a/b/c/e will be grouped together
-        and separate to tests in a/b/f
-
-        Returns: tuple (ordered dict of {test_dir: PathData}, total estimated runtime)
-        """
-
-        class PathData(object):
-            def __init__(self, path):
-                self.path = path
-                self.time = 0
-                self.tests = []
-
-        by_dir = OrderedDict()
-        total_time = 0
-
-        for i, (test_type, test_path, tests) in enumerate(manifest_items):
-            test_dir = tuple(os.path.split(test_path)[0].split(os.path.sep)[:3])
-
-            if not test_dir in by_dir:
-                by_dir[test_dir] = PathData(test_dir)
-
-            data = by_dir[test_dir]
-            time = sum(test.default_timeout if test.timeout !=
-                       "long" else test.long_timeout for test in tests)
-            data.time += time
-            total_time += time
-            data.tests.append((test_type, test_path, tests))
-
-        return by_dir, total_time
-
-    def _maybe_remove(self, chunks, i, direction):
-        """Trial removing a chunk from one chunk to an adjacent one.
-
-        :param chunks: - the list of all chunks
-        :param i: - the chunk index in the list of chunks to try removing from
-        :param direction: either "next" if we are going to move from the end to
-                          the subsequent chunk, or "prev" if we are going to move
-                          from the start into the previous chunk.
-
-        :returns bool: Did a chunk get moved?"""
-        source_chunk = chunks[i]
-        if direction == "next":
-            target_chunk = chunks[i+1]
-            path_index = -1
-            move_func = lambda: target_chunk.appendleft(source_chunk.pop())
-        elif direction == "prev":
-            target_chunk = chunks[i-1]
-            path_index = 0
-            move_func = lambda: target_chunk.append(source_chunk.popleft())
-        else:
-            raise ValueError("Unexpected move direction %s" % direction)
-
-        return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
-
-    def _maybe_add(self, chunks, i, direction):
-        """Trial adding a chunk from one chunk to an adjacent one.
-
-        :param chunks: - the list of all chunks
-        :param i: - the chunk index in the list of chunks to try adding to
-        :param direction: either "next" if we are going to remove from the
-                          the subsequent chunk, or "prev" if we are going to remove
-                          from the the previous chunk.
-
-        :returns bool: Did a chunk get moved?"""
-        target_chunk = chunks[i]
-        if direction == "next":
-            source_chunk = chunks[i+1]
-            path_index = 0
-            move_func = lambda: target_chunk.append(source_chunk.popleft())
-        elif direction == "prev":
-            source_chunk = chunks[i-1]
-            path_index = -1
-            move_func = lambda: target_chunk.appendleft(source_chunk.pop())
-        else:
-            raise ValueError("Unexpected move direction %s" % direction)
-
-        return self._maybe_move(source_chunk, target_chunk, path_index, move_func)
-
-    def _maybe_move(self, source_chunk, target_chunk, path_index, move_func):
-        """Move from one chunk to another, assess the change in badness,
-        and keep the move iff it decreases the badness score.
-
-        :param source_chunk: chunk to move from
-        :param target_chunk: chunk to move to
-        :param path_index: 0 if we are moving from the start or -1 if we are moving from the
-                           end
-        :param move_func: Function that actually moves between chunks"""
-        if len(source_chunk.paths) <= 1:
-            return False
-
-        move_time = source_chunk.paths[path_index].time
-
-        new_source_badness = self._badness(source_chunk.time - move_time)
-        new_target_badness = self._badness(target_chunk.time + move_time)
-
-        delta_badness = ((new_source_badness + new_target_badness) -
-                         (source_chunk.badness + target_chunk.badness))
-        if delta_badness < 0:
-            move_func()
-            return True
-
-        return False
-
-    def _badness(self, time):
-        """Metric of badness for a specific chunk
-
-        :param time: the time for a specific chunk"""
-        return (time - self.expected_time)**2
-
-    def _get_chunk(self, manifest_items):
-        by_dir, total_time = self._group_by_directory(manifest_items)
-
-        if len(by_dir) < self.total_chunks:
-            raise ValueError("Tried to split into %i chunks, but only %i subdirectories included" % (
-                self.total_chunks, len(by_dir)))
-
-        self.expected_time = float(total_time) / self.total_chunks
-
-        chunks = self._create_initial_chunks(by_dir)
-
-        while True:
-            # Move a test from one chunk to the next until doing so no longer
-            # reduces the badness
-            got_improvement = self._update_chunks(chunks)
-            if not got_improvement:
-                break
-
-        self.logger.debug(self.expected_time)
-        for i, chunk in chunks.iteritems():
-            self.logger.debug("%i: %i, %i" % (i + 1, chunk.time, chunk.badness))
-
-        assert self._all_tests(by_dir) == self._chunked_tests(chunks)
-
-        return self._get_tests(chunks)
-
-    @staticmethod
-    def _all_tests(by_dir):
-        """Return a set of all tests in the manifest from a grouping by directory"""
-        return set(x[0] for item in by_dir.itervalues()
-                   for x in item.tests)
-
-    @staticmethod
-    def _chunked_tests(chunks):
-        """Return a set of all tests in the manifest from the chunk list"""
-        return set(x[0] for chunk in chunks.itervalues()
-                   for path in chunk.paths
-                   for x in path.tests)
-
-
-    def _create_initial_chunks(self, by_dir):
-        """Create an initial unbalanced list of chunks.
-
-        :param by_dir: All tests in the manifest grouped by subdirectory
-        :returns list: A list of Chunk objects"""
-
-        class Chunk(object):
-            def __init__(self, paths, index):
-                """List of PathData objects that together form a single chunk of
-                tests"""
-                self.paths = deque(paths)
-                self.time = sum(item.time for item in paths)
-                self.index = index
-
-            def appendleft(self, path):
-                """Add a PathData object to the start of the chunk"""
-                self.paths.appendleft(path)
-                self.time += path.time
-
-            def append(self, path):
-                """Add a PathData object to the end of the chunk"""
-                self.paths.append(path)
-                self.time += path.time
-
-            def pop(self):
-                """Remove PathData object from the end of the chunk"""
-                assert len(self.paths) > 1
-                self.time -= self.paths[-1].time
-                return self.paths.pop()
-
-            def popleft(self):
-                """Remove PathData object from the start of the chunk"""
-                assert len(self.paths) > 1
-                self.time -= self.paths[0].time
-                return self.paths.popleft()
-
-            @property
-            def badness(self_):
-                """Badness metric for this chunk"""
-                return self._badness(self_.time)
-
-        initial_size = len(by_dir) / self.total_chunks
-        chunk_boundaries = [initial_size * i
-                            for i in xrange(self.total_chunks)] + [len(by_dir)]
-
-        chunks = OrderedDict()
-        for i, lower in enumerate(chunk_boundaries[:-1]):
-            upper = chunk_boundaries[i + 1]
-            paths = by_dir.values()[lower:upper]
-            chunks[i] = Chunk(paths, i)
-
-        assert self._all_tests(by_dir) == self._chunked_tests(chunks)
-
-        return chunks
-
-    def _update_chunks(self, chunks):
-        """Run a single iteration of the chunk update algorithm.
-
-        :param chunks: - List of chunks
-        """
-        #TODO: consider replacing this with a heap
-        sorted_chunks = sorted(chunks.values(), key=lambda x:-x.badness)
-        got_improvement = False
-        for chunk in sorted_chunks:
-            if chunk.time < self.expected_time:
-                f = self._maybe_add
-            else:
-                f = self._maybe_remove
-
-            if chunk.index == 0:
-                order = ["next"]
-            elif chunk.index == self.total_chunks - 1:
-                order = ["prev"]
-            else:
-                if chunk.time < self.expected_time:
-                    # First try to add a test from the neighboring chunk with the
-                    # greatest total time
-                    if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
-                        order = ["next", "prev"]
-                    else:
-                        order = ["prev", "next"]
-                else:
-                    # First try to remove a test and add to the neighboring chunk with the
-                    # lowest total time
-                    if chunks[chunk.index + 1].time > chunks[chunk.index - 1].time:
-                        order = ["prev", "next"]
-                    else:
-                        order = ["next", "prev"]
-
-            for direction in order:
-                if f(chunks, chunk.index, direction):
-                    got_improvement = True
-                    break
-
-            if got_improvement:
-                break
-
-        return got_improvement
-
-    def _get_tests(self, chunks):
-        """Return the list of tests corresponding to the chunk number we are running.
-
-        :param chunks: List of chunks"""
-        tests = []
-        for path in chunks[self.chunk_number - 1].paths:
-            tests.extend(path.tests)
-
-        return tests
-
-    def __call__(self, manifest_iter):
-        manifest = list(manifest_iter)
-        tests = self._get_chunk(manifest)
-        for item in tests:
-            yield item
-
-
-class TestFilter(object):
-    def __init__(self, test_manifests, include=None, exclude=None, manifest_path=None):
-        if manifest_path is not None and include is None:
-            self.manifest = manifestinclude.get_manifest(manifest_path)
-        else:
-            self.manifest = manifestinclude.IncludeManifest.create()
-
-        if include:
-            self.manifest.set("skip", "true")
-            for item in include:
-                self.manifest.add_include(test_manifests, item)
-
-        if exclude:
-            for item in exclude:
-                self.manifest.add_exclude(test_manifests, item)
-
-    def __call__(self, manifest_iter):
-        for test_type, test_path, tests in manifest_iter:
-            include_tests = set()
-            for test in tests:
-                if self.manifest.include(test):
-                    include_tests.add(test)
-
-            if include_tests:
-                yield test_type, test_path, include_tests
-
-class TagFilter(object):
-    def __init__(self, tags):
-        self.tags = set(tags)