Bug 945222 - Initial import of web-platform-tests testsuite 3/4: wptrunner test harness, r=ahal,Ms2ger,ato
authorJames Graham <james@hoppipolla.co.uk>
Thu, 04 Sep 2014 12:51:51 +0100
changeset 226859 8471b93cfa597dd649985b193e2032813407ee04
parent 226858 f6a75f9ad10fd45d41ffdcfc7dfedceb9f28e665
child 226860 c3773aeab2f8791dcfd6213226b983eae6b276cb
push id4187
push userbhearsum@mozilla.com
push dateFri, 28 Nov 2014 15:29:12 +0000
treeherdermozilla-beta@f23cc6a30c11 [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersahal, Ms2ger, ato
bugs945222
milestone35.0a1
first release with
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Bug 945222 - Initial import of web-platform-tests testsuite 3/4: wptrunner test harness, r=ahal,Ms2ger,ato
testing/web-platform/harness/.gitignore
testing/web-platform/harness/MANIFEST.in
testing/web-platform/harness/README.rst
testing/web-platform/harness/docs/Makefile
testing/web-platform/harness/docs/architecture.svg
testing/web-platform/harness/docs/conf.py
testing/web-platform/harness/docs/design.rst
testing/web-platform/harness/docs/expectation.rst
testing/web-platform/harness/docs/index.rst
testing/web-platform/harness/docs/make.bat
testing/web-platform/harness/docs/usage.rst
testing/web-platform/harness/requirements.txt
testing/web-platform/harness/requirements_b2g.txt
testing/web-platform/harness/requirements_chrome.txt
testing/web-platform/harness/requirements_firefox.txt
testing/web-platform/harness/requirements_servo.txt
testing/web-platform/harness/setup.py
testing/web-platform/harness/wptrunner.default.ini
testing/web-platform/harness/wptrunner/__init__.py
testing/web-platform/harness/wptrunner/browsers/__init__.py
testing/web-platform/harness/wptrunner/browsers/b2g.py
testing/web-platform/harness/wptrunner/browsers/b2g_setup/certtest_app.zip
testing/web-platform/harness/wptrunner/browsers/base.py
testing/web-platform/harness/wptrunner/browsers/chrome.py
testing/web-platform/harness/wptrunner/browsers/firefox.py
testing/web-platform/harness/wptrunner/browsers/server-locations.txt
testing/web-platform/harness/wptrunner/browsers/servo.py
testing/web-platform/harness/wptrunner/config.json
testing/web-platform/harness/wptrunner/config.py
testing/web-platform/harness/wptrunner/executors/__init__.py
testing/web-platform/harness/wptrunner/executors/base.py
testing/web-platform/harness/wptrunner/executors/executormarionette.py
testing/web-platform/harness/wptrunner/executors/executorselenium.py
testing/web-platform/harness/wptrunner/executors/executorservo.py
testing/web-platform/harness/wptrunner/executors/process.py
testing/web-platform/harness/wptrunner/executors/reftest-wait.js
testing/web-platform/harness/wptrunner/executors/reftest.js
testing/web-platform/harness/wptrunner/executors/testharness_marionette.js
testing/web-platform/harness/wptrunner/executors/testharness_webdriver.js
testing/web-platform/harness/wptrunner/expected.py
testing/web-platform/harness/wptrunner/hosts.py
testing/web-platform/harness/wptrunner/manifestexpected.py
testing/web-platform/harness/wptrunner/manifestinclude.py
testing/web-platform/harness/wptrunner/manifestupdate.py
testing/web-platform/harness/wptrunner/metadata.py
testing/web-platform/harness/wptrunner/products.py
testing/web-platform/harness/wptrunner/reduce.py
testing/web-platform/harness/wptrunner/testharness_runner.html
testing/web-platform/harness/wptrunner/testharnessreport.js
testing/web-platform/harness/wptrunner/testrunner.py
testing/web-platform/harness/wptrunner/tests/__init__.py
testing/web-platform/harness/wptrunner/tests/test_chunker.py
testing/web-platform/harness/wptrunner/tests/test_hosts.py
testing/web-platform/harness/wptrunner/tests/test_update.py
testing/web-platform/harness/wptrunner/update.py
testing/web-platform/harness/wptrunner/vcs.py
testing/web-platform/harness/wptrunner/wptcommandline.py
testing/web-platform/harness/wptrunner/wptmanifest/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/conditional.py
testing/web-platform/harness/wptrunner/wptmanifest/backends/static.py
testing/web-platform/harness/wptrunner/wptmanifest/node.py
testing/web-platform/harness/wptrunner/wptmanifest/parser.py
testing/web-platform/harness/wptrunner/wptmanifest/serializer.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/__init__.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_conditional.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_parser.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_serializer.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_static.py
testing/web-platform/harness/wptrunner/wptmanifest/tests/test_tokenizer.py
testing/web-platform/harness/wptrunner/wptrunner.py
testing/web-platform/harness/wptrunner/wpttest.py
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/.gitignore
@@ -0,0 +1,5 @@
+*.py[co]
+*~
+*#
+\#*
+_virtualenv
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/MANIFEST.in
@@ -0,0 +1,12 @@
+exclude MANIFEST.in
+include requirements.txt
+include wptrunner/browsers/b2g_setup/*
+include wptrunner.default.ini
+include wptrunner/testharness_runner.html
+include wptrunner/testharnessreport.js
+include wptrunner/executors/testharness_marionette.js
+include wptrunner/executors/testharness_webdriver.js
+include wptrunner/executors/reftest.js
+include wptrunner/executors/reftest-wait.js
+include wptrunner/config.json
+include wptrunner/browsers/server-locations.txt
\ No newline at end of file
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/README.rst
@@ -0,0 +1,156 @@
+web-platform-tests Harness
+==========================
+
+This harness is designed for running the W3C web-platform-tests
+`testsuite_`.
+
+The code hasn't been merged to master yet, but when it is the
+documentation below might be quite relevant.
+
+Installation
+~~~~~~~~~~~~
+
+wptrunner is expected to be installed into a virtualenv using pip. For
+development, it can be installed using the `-e` option::
+
+  pip install -e ./
+
+Running the Tests
+~~~~~~~~~~~~~~~~~
+
+After installation the command `wptrunner` should be avaliable to run
+the tests. This takes two arguments; the path to the metadata
+directory containing expectation files (see below) and a MANIFEST.json
+file (see the web-platform-tests documentation for isntructions on
+generating this file), and the path to the web-platform-tests
+checkout::
+
+  wptrunner /path/to/metadata /path/to/tests
+
+There are also a variety of other options available; use `--help` to
+list them.
+
+Expectation Data
+~~~~~~~~~~~~~~~~
+
+wptrunner is designed to be used in an environment where it is not
+just necessary to know which tests passed, but to compare the results
+between runs. For this reason it is possible to store the results of a
+previous run in a set of ini-like "expectation files". This format is
+documented below. To generate the expectation files use `wptrunner` with
+the `--log-raw=/path/to/log/file` option. This can then be used as
+input to the `wptupdate` tool.
+
+Expectation File Format
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Metadat about tests, notably including their expected results, is
+stored in a modified ini-like format that is designed to be human
+editable, but also to be machine updatable.
+
+Each test file that requires metadata to be specified (because it has
+a non-default expectation or because it is disabled, for example) has
+a corresponding expectation file in the `metadata` directory. For
+example a test file `html/test1.html` containing a failing test would
+have an expectation file called `html/test1.html.ini` in the
+`metadata` directory.
+
+An example of an expectation file is::
+
+  example_default_key: example_value
+
+  [filename.html]
+    type: testharness
+
+    [subtest1]
+      expected: FAIL
+
+    [subtest2]
+      expected:
+        if platform == 'win': TIMEOUT
+        if platform == 'osx': ERROR
+        FAIL
+
+  [filename.html?query=something]
+    type: testharness
+    disabled: bug12345
+
+The file consists of two elements, key-value pairs and
+sections.
+
+Sections are delimited by headings enclosed in square brackets. Any
+closing square bracket in the heading itself my be escaped with a
+backslash. Each section may then contain any number of key-value pairs
+followed by any number of subsections. So that it is clear which data
+belongs to each section without the use of end-section markers, the
+data for each section (i.e. the key-value pairs and subsections) must
+be indented using spaces. Indentation need only be consistent, but
+using two spaces per level is recommended.
+
+In a test expectation file, each resource provided by the file has a
+single section, with the section heading being the part after the last
+`/` in the test url. Tests that have subsections may have subsections
+for those subtests in which the heading is the name of the subtest.
+
+Simple key-value pairs are of the form::
+
+  key: value
+
+Note that unlike ini files, only `:` is a valid seperator; `=` will
+not work as expected. Key-value pairs may also have conditional
+values of the form::
+
+  key:
+    if condition1: value1
+    if condition2: value2
+    default
+
+In this case each conditional is evaluated in turn and the value is
+that on the right hand side of the first matching conditional. In the
+case that no condition matches, the unconditional default is used. If
+no condition matches and no default is provided it is equivalent to
+the key not being present. Conditionals use a simple python-like expression
+language e.g.::
+
+  if debug and (platform == "linux" or platform == "osx"): FAIL
+
+For test expectations the avaliable variables are those in the
+`run_info` which for desktop are `version`, `os`, `bits`, `processor`,
+`debug` and `product`.
+
+Key-value pairs specified at the top level of the file before any
+sections are special as they provide defaults for the rest of the file
+e.g.::
+
+  key1: value1
+
+  [section 1]
+    key2: value2
+
+  [section 2]
+    key1: value3
+
+In this case, inside section 1, `key1` would have the value `value1`
+and `key2` the value `value2` whereas in section 2 `key1` would have
+the value `value3` and `key2` would be undefined.
+
+The web-platform-test harness knows about several keys:
+
+`expected`
+  Must evaluate to a possible test status indicating the expected
+  result of the test. The implicit default is PASS or OK when the
+  field isn't present.
+
+`disabled`
+  Any value indicates that the test is disabled.
+
+`type`
+  The test type e.g. `testharness` or `reftest`.
+
+`reftype`
+  The type of comparison for reftests; either `==` or `!=`.
+
+`refurl`
+  The reference url for reftests.
+
+_testsuite: https://github.com/w3c/web-platform-tests
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/Makefile
@@ -0,0 +1,177 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS    =
+SPHINXBUILD   = sphinx-build
+PAPER         =
+BUILDDIR      = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4     = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS   = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS  = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
+
+help:
+	@echo "Please use \`make <target>' where <target> is one of"
+	@echo "  html       to make standalone HTML files"
+	@echo "  dirhtml    to make HTML files named index.html in directories"
+	@echo "  singlehtml to make a single large HTML file"
+	@echo "  pickle     to make pickle files"
+	@echo "  json       to make JSON files"
+	@echo "  htmlhelp   to make HTML files and a HTML help project"
+	@echo "  qthelp     to make HTML files and a qthelp project"
+	@echo "  devhelp    to make HTML files and a Devhelp project"
+	@echo "  epub       to make an epub"
+	@echo "  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+	@echo "  latexpdf   to make LaTeX files and run them through pdflatex"
+	@echo "  latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+	@echo "  text       to make text files"
+	@echo "  man        to make manual pages"
+	@echo "  texinfo    to make Texinfo files"
+	@echo "  info       to make Texinfo files and run them through makeinfo"
+	@echo "  gettext    to make PO message catalogs"
+	@echo "  changes    to make an overview of all changed/added/deprecated items"
+	@echo "  xml        to make Docutils-native XML files"
+	@echo "  pseudoxml  to make pseudoxml-XML files for display purposes"
+	@echo "  linkcheck  to check all external links for integrity"
+	@echo "  doctest    to run all doctests embedded in the documentation (if enabled)"
+
+clean:
+	rm -rf $(BUILDDIR)/*
+
+html:
+	$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml:
+	$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+	@echo
+	@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml:
+	$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+	@echo
+	@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle:
+	$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+	@echo
+	@echo "Build finished; now you can process the pickle files."
+
+json:
+	$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+	@echo
+	@echo "Build finished; now you can process the JSON files."
+
+htmlhelp:
+	$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+	@echo
+	@echo "Build finished; now you can run HTML Help Workshop with the" \
+	      ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp:
+	$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+	@echo
+	@echo "Build finished; now you can run "qcollectiongenerator" with the" \
+	      ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+	@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/wptrunner.qhcp"
+	@echo "To view the help file:"
+	@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/wptrunner.qhc"
+
+devhelp:
+	$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+	@echo
+	@echo "Build finished."
+	@echo "To view the help file:"
+	@echo "# mkdir -p $$HOME/.local/share/devhelp/wptrunner"
+	@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/wptrunner"
+	@echo "# devhelp"
+
+epub:
+	$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+	@echo
+	@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo
+	@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+	@echo "Run \`make' in that directory to run these through (pdf)latex" \
+	      "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through pdflatex..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja:
+	$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+	@echo "Running LaTeX files through platex and dvipdfmx..."
+	$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+	@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text:
+	$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+	@echo
+	@echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man:
+	$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+	@echo
+	@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo
+	@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+	@echo "Run \`make' in that directory to run these through makeinfo" \
+	      "(use \`make info' here to do that automatically)."
+
+info:
+	$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+	@echo "Running Texinfo files through makeinfo..."
+	make -C $(BUILDDIR)/texinfo info
+	@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext:
+	$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+	@echo
+	@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes:
+	$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+	@echo
+	@echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck:
+	$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+	@echo
+	@echo "Link check complete; look for any errors in the above output " \
+	      "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest:
+	$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+	@echo "Testing of doctests in the sources finished, look at the " \
+	      "results in $(BUILDDIR)/doctest/output.txt."
+
+xml:
+	$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+	@echo
+	@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml:
+	$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+	@echo
+	@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/architecture.svg
@@ -0,0 +1,1 @@
+<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" width="780px" height="1087px" version="1.1"><defs><linearGradient x1="0%" y1="0%" x2="0%" y2="100%" id="mx-gradient-a9c4eb-1-a9c4eb-1-s-0"><stop offset="0%" style="stop-color:#A9C4EB"/><stop offset="100%" style="stop-color:#A9C4EB"/></linearGradient></defs><g transform="translate(0.5,0.5)"><rect x="498" y="498" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(500,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunner</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="778" width="120" height="60" fill="#f19c99" stroke="#000000" pointer-events="none"/><g transform="translate(340,801)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Product under test</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="388" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ManagerGroup</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="658" y="608" width="120" height="60" fill="#ffce9f" stroke="#000000" pointer-events="none"/><g transform="translate(660,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Executor</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="338" y="498" width="120" height="60" fill="url(#mx-gradient-a9c4eb-1-a9c4eb-1-s-0)" stroke="#000000" pointer-events="none"/><g transform="translate(340,521)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Browser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 398 382" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 387 L 395 380 L 398 382 L 402 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 448 L 398 492" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 497 L 395 490 L 398 492 L 402 490 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 618 528 L 684 603" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 687 607 L 680 604 L 684 603 L 685 600 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="498" y="608" width="120" height="60" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><g transform="translate(500,631)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">ExecutorBrowser</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 624 638 L 658 638" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 619 638 L 626 635 L 624 638 L 626 642 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 448 L 552 496" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 557 498 L 549 498 L 552 496 L 552 492 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 558 L 398 772" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 777 L 395 770 L 398 772 L 402 770 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="338" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(340,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">run_tests</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 458 78 L 652 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 657 78 L 650 82 L 652 78 L 650 75 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="658" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestLoader</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="71" y="48" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(73,71)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestEnvironment</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="151" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(153,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">wptserve</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><rect x="1" y="618" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(3,641)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">pywebsocket</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 338 78 L 197 78" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 192 78 L 199 75 L 197 78 L 199 82 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 101 308 L 62 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 61 617 L 59 610 L 62 612 L 66 610 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 161 308 L 204 612" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 204 617 L 200 610 L 204 612 L 207 609 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 338 823 L 61 678" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 211 678 L 338 793" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 398 108 L 398 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 227 L 395 220 L 398 222 L 402 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 706 288 L 618 513" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="658" y="388" width="70" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="693" y="412">Queue.get</text></g><path d="M 458 808 L 718 668" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="71" y="248" width="120" height="60" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><g transform="translate(73,271)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">serve.py</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 131 108 L 131 242" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 131 247 L 128 240 L 131 242 L 135 240 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 88 973 L 132 973" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 137 973 L 130 977 L 132 973 L 130 970 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="1018" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="1037">Communication (cross process)</text></g><path d="M 88 1002 L 132 1002" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><path d="M 137 1002 L 130 1006 L 132 1002 L 130 999 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="138" y="958" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="228" y="977">Ownership (same process)</text></g><path d="M 88 1033 L 138 1033" fill="none" stroke="#000000" stroke-miterlimit="10" stroke-dasharray="3 3" pointer-events="none"/><rect x="143" y="988" width="180" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="233" y="1007">Ownership (cross process)</text></g><rect x="428" y="966" width="50" height="15" fill="#e6d0de" stroke="#000000" pointer-events="none"/><rect x="428" y="990" width="50" height="15" fill="#a9c4eb" stroke="#000000" pointer-events="none"/><rect x="428" y="1015" width="50" height="15" fill="#ffce9f" stroke="#000000" pointer-events="none"/><rect x="428" y="1063" width="50" height="15" fill="#f19c99" stroke="#000000" pointer-events="none"/><rect x="428" y="1038" width="50" height="15" fill="#b9e0a5" stroke="#000000" pointer-events="none"/><rect x="485" y="958" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="530" y="977">wptrunner class</text></g><rect x="486" y="983" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1002">Per-product wptrunner class</text></g><rect x="486" y="1008" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="561" y="1027">Per-protocol wptrunner class</text></g><rect x="491" y="1031" width="150" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="566" y="1050">Web-platform-tests component</text></g><rect x="486" y="1055" width="90" height="30" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="531" y="1074">Browser process</text></g><path d="M 398 8 L 398 42" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 398 47 L 395 40 L 398 42 L 402 40 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="478" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(480,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 533 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 537 387 L 529 386 L 533 384 L 533 380 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="198" y="388" width="120" height="60" fill-opacity="0.5" fill="#e6d0de" stroke="#000000" stroke-opacity="0.5" pointer-events="none"/><g transform="translate(200,411)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">TestRunnerManager</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 398 288 L 263 384" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 259 387 L 263 380 L 263 384 L 267 386 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><rect x="575" y="748" width="110" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="630" y="758">Browser control</text><text x="630" y="772">protocol</text><text x="630" y="786">(e.g. WebDriver)</text></g><rect x="258" y="708" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="298" y="732">HTTP</text></g><rect x="111" y="728" width="80" height="40" fill="none" stroke="none" pointer-events="none"/><g fill="#000000" font-family="Helvetica" text-anchor="middle" font-size="12px"><text x="151" y="752">websockets</text></g><rect x="658" y="228" width="120" height="60" fill="#e6d0de" stroke="#000000" pointer-events="none"/><g transform="translate(660,251)"><switch><foreignObject pointer-events="all" width="116" height="15" requiredFeatures="http://www.w3.org/TR/SVG11/feature#Extensibility"><div xmlns="http://www.w3.org/1999/xhtml" style="display: inline-block; font-size: 12px; font-family: Helvetica; color: rgb(0, 0, 0); line-height: 1.26; vertical-align: top; width: 116px; white-space: normal; text-align: center;">Tests Queue</div></foreignObject><text x="58" y="14" fill="#000000" text-anchor="middle" font-size="12px" font-family="Helvetica">[Not supported by viewer]</text></switch></g><path d="M 718 108 L 718 222" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 718 227 L 715 220 L 718 222 L 722 220 Z" fill="#000000" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/><path d="M 428 970 L 428 970" fill="none" stroke="#000000" stroke-miterlimit="10" pointer-events="none"/></g></svg>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/conf.py
@@ -0,0 +1,267 @@
+# -*- coding: utf-8 -*-
+#
+# wptrunner documentation build configuration file, created by
+# sphinx-quickstart on Mon May 19 18:14:20 2014.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#sys.path.insert(0, os.path.abspath('.'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+    'sphinx.ext.autodoc',
+    'sphinx.ext.intersphinx',
+    'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix of source filenames.
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'wptrunner'
+copyright = u''
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.3'
+# The full version, including alpha/beta/rc tags.
+release = '0.3'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+html_theme = 'default'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further.  For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents.  If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar.  Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it.  The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'wptrunnerdoc'
+
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+#  author, documentclass [howto, manual, or own class]).
+latex_documents = [
+  ('index', 'wptrunner.tex', u'wptrunner Documentation',
+   u'James Graham', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+    ('index', 'wptrunner', u'wptrunner Documentation',
+     [u'James Graham'], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+#  dir menu entry, description, category)
+texinfo_documents = [
+  ('index', 'wptrunner', u'wptrunner Documentation',
+   u'James Graham', 'wptrunner', 'One line description of project.',
+   'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
+
+
+# Example configuration for intersphinx: refer to the Python standard library.
+intersphinx_mapping = {'python': ('http://docs.python.org/', None),
+                       'mozlog': ('http://mozbase.readthedocs.org/en/latest/', None)}
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/design.rst
@@ -0,0 +1,106 @@
+wptrunner Design
+================
+
+The design of wptrunner is intended to meet the following
+requirements:
+
+ * Possible to run tests from W3C web-platform-tests.
+
+ * Tests should be run as fast as possible. In particular it should
+   not be necessary to restart the browser between tests, or similar.
+
+ * As far as possible, the tests should run in a "normal" browser and
+   browsing context. In particular many tests assume that they are
+   running in a top-level browsing context, so we must avoid the use
+   of an ``iframe`` test container.
+
+ * It must be possible to deal with all kinds of behaviour of the
+   browser runder test, for example, crashing, hanging, etc.
+
+ * It should be possible to add support for new platforms and browsers
+   with minimal code changes.
+
+ * It must be possible to run tests in parallel to further improve
+   performance.
+
+ * Test output must be in a machine readable form.
+
+Architecture
+------------
+
+In order to meet the above requirements, wptrunner is designed to
+push as much of the test scheduling as possible into the harness. This
+allows the harness to monitor the state of the browser and perform
+appropriate action if it gets into an unwanted state e.g. kill the
+browser if it appears to be hung.
+
+The harness will typically communicate with the browser via some remote
+control protocol such as WebDriver. However for browsers where no such
+protocol is supported, other implementation strategies are possible,
+typically at the expense of speed.
+
+The overall architecture of wptrunner is shown in the diagram below:
+
+.. image:: architecture.svg
+
+The main entry point to the code is :py:func:`run_tests` in
+``wptrunner.py``. This is responsible for setting up the test
+environment, loading the list of tests to be executed, and invoking
+the remainder of the code to actually execute some tests.
+
+The test environment is encapsulated in the
+:py:class:`TestEnvironment` class. This defers to code in
+``web-platform-tests`` which actually starts the required servers to
+run the tests.
+
+The set of tests to run is defined by the
+:py:class:`TestLoader`. This is constructed with a
+:py:class:`TestFilter` (not shown), which takes any filter arguments
+from the command line to restrict the set of tests that will be
+run. The :py:class:`TestLoader` reads both the ``web-platform-tests``
+JSON manifest and the expectation data stored in ini files and
+produces a :py:class:`multiprocessing.Queue` of tests to run, and
+their expected results.
+
+Actually running the tests happens through the
+:py:class:`ManagerGroup` object. This takes the :py:class:`Queue` of
+tests to be run and starts a :py:class:`testrunner.TestRunnerManager` for each
+instance of the browser under test that will be started. These
+:py:class:`TestRunnerManager` instances are each started in their own
+thread.
+
+A :py:class:`TestRunnerManager` coordinates starting the product under
+test, and outputting results from the test. In the case that the test
+has timed out or the browser has crashed, it has to restart the
+browser to ensure the test run can continue. The functionality for
+initialising the browser under test, and probing its state
+(e.g. whether the process is still alive) is implemented through a
+:py:class:`Browser` object. An implementation of this class must be
+provided for each product that is supported.
+
+The functionality for actually running the tests is provided by a
+:py:class:`TestRunner` object. :py:class:`TestRunner` instances are
+run in their own child process created with the
+:py:mod:`multiprocessing` module. This allows them to run concurrently
+and to be killed and restarted as required. Communication between the
+:py:class:`TestRunnerManager` and the :py:class:`TestRunner` is
+provided by a pair of queues, one for sending messages in each
+direction. In particular test results are sent from the
+:py:class:`TestRunner` to the :py:class:`TestRunnerManager` using one
+of these queues.
+
+The :py:class:`TestRunner` object is generic in that the same
+:py:class:`TestRunner` is used regardless of the product under
+test. However the details of how to run the test may vary greatly with
+the product since different products support different remote control
+protocols (or none at all). These protocol-specific parts are placed
+in the :py:class:`Executor` object. There is typically a different
+:py:class:`Executor` class for each combination of control protocol
+and test type. The :py:class:`TestRunner` is responsible for pulling
+each test off the :py:class:`Queue` of tests and passing it down to
+the :py:class:`Executor`.
+
+The executor often requires access to details of the particular
+browser instance that it is testing so that it knows e.g. which port
+to connect to to send commands to the browser. These details are
+encapsulated in the :py:class:`ExecutorBrowser` class.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/expectation.rst
@@ -0,0 +1,245 @@
+Expectation Data
+================
+
+Introduction
+------------
+
+For use in continuous integration systems, and other scenarios where
+regression tracking is required, wptrunner supports storing and
+loading the expected result of each test in a test run. Typically
+these expected results will initially be generated by running the
+testsuite in a baseline build. They may then be edited by humans as
+new features are added to the product that change the expected
+results. The expected results may also vary for a single product
+depending on the platform on which it is run. Therefore, the raw
+structured log data is not a suitable format for storing these
+files. Instead something is required that is:
+
+ * Human readable
+
+ * Human editable
+
+ * Machine readable / writable
+
+ * Capable of storing test id / result pairs
+
+ * Suitable for storing in a version control system (i.e. text-based)
+
+The need for different results per platform means either having
+multiple expectation files for each platform, or having a way to
+express conditional values within a certain file. The former would be
+rather cumbersome for humans updating the expectation files, so the
+latter approach has been adopted, leading to the requirement:
+
+ * Capable of storing result values that are conditional on the platform.
+
+There are few extant formats that meet these requirements, so
+wptrunner uses a bespoke ``expectation manifest`` format, which is
+closely based on the standard ``ini`` format.
+
+Directory Layout
+----------------
+
+Expectation manifest files must be stored under the ``metadata``
+directory passed to the test runner. The directory layout follows that
+of web-platform-tests with each test path having a corresponding
+manifest file. Tests that differ only by query string, or reftests
+with the same test path but different ref paths share the same
+reference file. The file name is taken from the last /-separated part
+of the path, suffixed with ``.ini``.
+
+As an optimisation, files which produce only default results
+(i.e. ``PASS`` or ``OK``) don't require a corresponding manifest file.
+
+For example a test with url::
+
+  /spec/section/file.html?query=param
+
+would have an expectation file ::
+
+  metadata/spec/section/file.html.ini
+
+
+.. _wptupdate-label:
+
+Generating Expectation Files
+----------------------------
+
+wptrunner provides the tool ``wptupdate`` to generate expectation
+files from the results of a set of baseline test runs. The basic
+syntax for this is::
+
+  wptupdate [options] [logfile]...
+
+Each ``logfile`` is a structured log file from a previous run. These
+can be generated from wptrunner using the ``--log-raw`` option
+e.g. ``--log-raw=structured.log``. The default behaviour is to update
+all the test data for the particular combination of hardware and OS
+used in the run corresponding to the log data, whilst leaving any
+other expectations untouched.
+
+wptupdate takes several useful options:
+
+``--sync``
+  Pull the latest version of web-platform-tests from the
+  upstream specified in the config file. If this is specified in
+  combination with logfiles, it is assumed that the results in the log
+  files apply to the post-update tests.
+
+``--no-check-clean``
+  Don't attempt to check if the working directory is clean before
+  doing the update (assuming that the working directory is a git or
+  mercurial tree).
+
+``--patch``
+  Create a branch containing a git commit, or a mq patch with the
+  changes made by wptupdate.
+
+``--ignore-existing``
+  Overwrite all the expectation data for any tests that have a result
+  in the passed log files, not just data for the same platform.
+
+Examples
+~~~~~~~~
+
+Update the local copy of web-platform-tests without changing the
+expectation data and commit (or create a mq patch for) the result::
+
+  wptupdate --patch --sync
+
+Update all the expectations from a set of cross-platform test runs::
+
+  wptupdate --no-check-clean --patch osx.log linux.log windows.log
+
+Add expectation data for some new tests that are expected to be
+platform-independent::
+
+  wptupdate --no-check-clean --patch --ignore-existing tests.log
+
+Manifest Format
+---------------
+The format of the manifest files is based on the ini format. Files are
+divided into sections, each (apart from the root section) having a
+heading enclosed in square braces. Within each section are key-value
+pairs. There are several notable differences from standard .ini files,
+however:
+
+ * Sections may be hierarchically nested, with significant whitespace
+   indicating nesting depth.
+
+ * Only ``:`` is valid as a key/value separator
+
+A simple example of a manifest file is::
+
+  root_key: root_value
+
+  [section]
+    section_key: section_value
+
+    [subsection]
+       subsection_key: subsection_value
+
+  [another_section]
+    another_key: another_value
+
+Conditional Values
+~~~~~~~~~~~~~~~~~~
+
+In order to support values that depend on some external data, the
+right hand side of a key/value pair can take a set of conditionals
+rather than a plain value. These values are placed on a new line
+following the key, with significant indentation. Conditional values
+are prefixed with ``if`` and terminated with a colon, for example::
+
+  key:
+    if cond1: value1
+    if cond2: value2
+    value3
+
+In this example, the value associated with ``key`` is determined by
+first evaluating ``cond1`` against external data. If that is true,
+``key`` is assigned the value ``value1``, otherwise ``cond2`` is
+evaluated in the same way. If both ``cond1`` and ``cond2`` are false,
+the unconditional ``value3`` is used.
+
+Conditions themselves use a Python-like expression syntax. Operands
+can either be variables, corresponding to data passed in, numbers
+(integer or floating point; exponential notation is not supported) or
+quote-delimited strings. Equality is tested using ``==`` and
+inequality by ``!=``. The operators ``and``, ``or`` and ``not`` are
+used in the expected way. Parentheses can also be used for
+grouping. For example::
+
+  key:
+    if (a == 2 or a == 3) and b == "abc": value1
+    if a == 1 or b != "abc": value2
+    value3
+
+Here ``a`` and ``b`` are variables, the value of which will be
+supplied when the manifest is used.
+
+Expectation Manifests
+---------------------
+
+When used for expectation data, manifests have the following format:
+
+ * A section per test URL described by the manifest, with the section
+   heading being the part of the test URL following the last ``/`` in
+   the path (this allows multiple tests in a single manifest file with
+   the same path part of the URL, but different query parts).
+
+ * A subsection per subtest, with the heading being the title of the
+   subtest.
+
+ * A key ``type`` indicating the test type. This takes the values
+   ``testharness`` and ``reftest``.
+
+ * For reftests, keys ``reftype`` indicating the reference type
+   (``==`` or ``!=``) and ``refurl`` indicating the URL of the
+   reference.
+
+ * A key ``expected`` giving the expectation value of each (sub)test.
+
+ * A key ``disabled`` which can be set to any value to indicate that
+   the (sub)test is disabled and should either not be run (for tests)
+   or that its results should be ignored (subtests).
+
+ * Variables ``debug``, ``os``, ``version``, ``processor`` and
+   ``bits`` that describe the configuration of the browser under
+   test. ``debug`` is a boolean indicating whether a build is a debug
+   build. ``os`` is a string indicating the operating system, and
+   ``version`` a string indicating the particular version of that
+   operating system. ``processor`` is a string indicating the
+   processor architecture and ``bits`` an integer indicating the
+   number of bits. This information is typically provided by
+   :py:mod:`mozinfo`.
+
+ * Top level keys are taken as defaults for the whole file. So, for
+   example, a top level key with ``expected: FAIL`` would indicate
+   that all tests and subtests in the file are expected to fail,
+   unless they have an ``expected`` key of their own.
+
+An simple example manifest might look like::
+
+  [test.html?variant=basic]
+    type: testharness
+
+    [Test something unsupported]
+       expected: FAIL
+
+  [test.html?variant=broken]
+    expected: ERROR
+
+  [test.html?variant=unstable]
+    disabled: http://test.bugs.example.org/bugs/12345
+
+A more complex manifest with conditional properties might be::
+
+  [canvas_test.html]
+    expected:
+      if os == "osx": FAIL
+      if os == "windows" and version == "XP": FAIL
+      PASS
+
+Note that ``PASS`` in the above works, but is unnecessary; ``PASS``
+(or ``OK``) is always the default expectation for (sub)tests.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/index.rst
@@ -0,0 +1,24 @@
+.. wptrunner documentation master file, created by
+   sphinx-quickstart on Mon May 19 18:14:20 2014.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to wptrunner's documentation!
+=====================================
+
+Contents:
+
+.. toctree::
+   :maxdepth: 2
+
+   usage
+   expectation
+   design
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/make.bat
@@ -0,0 +1,242 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+	set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+	set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+	set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+	:help
+	echo.Please use `make ^<target^>` where ^<target^> is one of
+	echo.  html       to make standalone HTML files
+	echo.  dirhtml    to make HTML files named index.html in directories
+	echo.  singlehtml to make a single large HTML file
+	echo.  pickle     to make pickle files
+	echo.  json       to make JSON files
+	echo.  htmlhelp   to make HTML files and a HTML help project
+	echo.  qthelp     to make HTML files and a qthelp project
+	echo.  devhelp    to make HTML files and a Devhelp project
+	echo.  epub       to make an epub
+	echo.  latex      to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+	echo.  text       to make text files
+	echo.  man        to make manual pages
+	echo.  texinfo    to make Texinfo files
+	echo.  gettext    to make PO message catalogs
+	echo.  changes    to make an overview over all changed/added/deprecated items
+	echo.  xml        to make Docutils-native XML files
+	echo.  pseudoxml  to make pseudoxml-XML files for display purposes
+	echo.  linkcheck  to check all external links for integrity
+	echo.  doctest    to run all doctests embedded in the documentation if enabled
+	goto end
+)
+
+if "%1" == "clean" (
+	for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+	del /q /s %BUILDDIR%\*
+	goto end
+)
+
+
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+	echo.
+	echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+	echo.installed, then set the SPHINXBUILD environment variable to point
+	echo.to the full path of the 'sphinx-build' executable. Alternatively you
+	echo.may add the Sphinx directory to PATH.
+	echo.
+	echo.If you don't have Sphinx installed, grab it from
+	echo.http://sphinx-doc.org/
+	exit /b 1
+)
+
+if "%1" == "html" (
+	%SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+	goto end
+)
+
+if "%1" == "dirhtml" (
+	%SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+	goto end
+)
+
+if "%1" == "singlehtml" (
+	%SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+	goto end
+)
+
+if "%1" == "pickle" (
+	%SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the pickle files.
+	goto end
+)
+
+if "%1" == "json" (
+	%SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can process the JSON files.
+	goto end
+)
+
+if "%1" == "htmlhelp" (
+	%SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+	goto end
+)
+
+if "%1" == "qthelp" (
+	%SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+	echo.^> qcollectiongenerator %BUILDDIR%\qthelp\wptrunner.qhcp
+	echo.To view the help file:
+	echo.^> assistant -collectionFile %BUILDDIR%\qthelp\wptrunner.ghc
+	goto end
+)
+
+if "%1" == "devhelp" (
+	%SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished.
+	goto end
+)
+
+if "%1" == "epub" (
+	%SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The epub file is in %BUILDDIR%/epub.
+	goto end
+)
+
+if "%1" == "latex" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdf" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "latexpdfja" (
+	%SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+	cd %BUILDDIR%/latex
+	make all-pdf-ja
+	cd %BUILDDIR%/..
+	echo.
+	echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+	goto end
+)
+
+if "%1" == "text" (
+	%SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The text files are in %BUILDDIR%/text.
+	goto end
+)
+
+if "%1" == "man" (
+	%SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The manual pages are in %BUILDDIR%/man.
+	goto end
+)
+
+if "%1" == "texinfo" (
+	%SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+	goto end
+)
+
+if "%1" == "gettext" (
+	%SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+	goto end
+)
+
+if "%1" == "changes" (
+	%SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.The overview file is in %BUILDDIR%/changes.
+	goto end
+)
+
+if "%1" == "linkcheck" (
+	%SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+	goto end
+)
+
+if "%1" == "doctest" (
+	%SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+	goto end
+)
+
+if "%1" == "xml" (
+	%SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The XML files are in %BUILDDIR%/xml.
+	goto end
+)
+
+if "%1" == "pseudoxml" (
+	%SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+	if errorlevel 1 exit /b 1
+	echo.
+	echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+	goto end
+)
+
+:end
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/docs/usage.rst
@@ -0,0 +1,157 @@
+Getting Started
+===============
+
+Installing wptrunner
+--------------------
+
+The easiest way to install wptrunner is into a virtualenv, using pip::
+
+  virtualenv wptrunner
+  cd wptrunner
+  source bin/activate
+  pip install wptrunner
+
+This will install the base dependencies for wptrunner, but not any
+extra dependencies required to test against specific browsers. In
+order to do this you must use use the extra requirements files in
+``$VIRTUAL_ENV/requirements/requirements_browser.txt``. For example,
+in order to test against Firefox you would have to run::
+
+  pip install requirements/requirements_firefox.txt
+
+If you intend to work on the code, the ``-e`` option to pip should be
+used in combination with a source checkout i.e. inside a virtual
+environment created as above::
+
+  git clone https://github.com/w3c/wptrunner.git
+  cd wptrunner
+  pip install -e ./
+
+In addition to the dependencies installed by pip, wptrunner requires
+a copy of the web-platform-tests. This can be located anywhere on
+the filesystem, but the easiest option is to put it in a sibling
+directory of the wptrunner checkout called `tests`::
+
+  git clone https://github.com/w3c/web-platform-tests.git tests
+
+It is also necessary to generate the ``MANIFEST.json`` file for the
+web-platform-tests. It is recommended to put this file in a separate
+directory called ``meta``::
+
+  mkdir meta
+  cd web-platform-tests
+  python tools/scripts/manifest.py ../meta/MANIFEST.json
+
+This file needs to be regenerated every time that the
+web-platform-tests checkout is updated. To aid with the update process
+there is a tool called ``wptupdate``, which is described in
+:ref:`wptupdate-label`.
+
+Running the Tests
+-----------------
+
+A test run is started using the ``wptrunner`` command. By default this
+assumes that tests are in a subdirectory of the current directory
+called ``tests`` and the metadata is in a subdirectory called
+``meta``. These defaults can be changed using either a command line
+flag or a configuration file.
+
+To specify the browser product to test against, use the ``--product``
+flag. If no product is specified, the default is ``firefox`` which
+tests Firefox desktop. ``wptrunner --help`` can be used to see a list
+of supported products. Note that this does not take account of the
+products for which the correct dependencies have been installed.
+
+Depending on the product, further arguments may be required. For
+example when testing desktop browsers ``--binary`` is commonly needed
+to specify the path to the browser executable. So a complete command
+line for running tests on firefox desktop might be::
+
+  wptrunner --product=firefox --binary=/usr/bin/firefox
+
+It is also possible to run multiple browser instances in parallel to
+speed up the testing process. This is achieved through the
+``--processes=N`` argument e.g. ``--processes=6`` would attempt to run
+6 browser instances in parallel. Note that behaviour in this mode is
+necessarily less deterministic than with ``--processes=1`` (the
+default) so there may be more noise in the test results.
+
+Further help can be obtained from::
+
+  wptrunner --help
+
+Output
+------
+
+wptrunner uses the :py:mod:`mozlog.structured` package for output. This
+structures events such as test results or log messages as JSON objects
+that can then be fed to other tools for interpretation. More details
+about the message format are given in the
+:py:mod:`mozlog.structured` documentation.
+
+By default the raw JSON messages are dumped to stdout. This is
+convenient for piping into other tools, but not ideal for humans
+reading the output. :py:mod:`mozlog` comes with several other
+formatters, which are accessible through command line options. The
+general format of these options is ``--log-name=dest``, where ``name``
+is the name of the format and ``dest`` is a path to a destination
+file, or ``-`` for stdout. The raw JSON data is written by the ``raw``
+formatter so, the default setup corresponds to ``--log-raw=-``.
+
+A reasonable output format for humans is provided as ``mach``. So in
+order to output the full raw log to a file and a human-readable
+summary to stdout, one might pass the options::
+
+  --log-raw=output.log --log-mach=-
+
+Configuration File
+------------------
+
+wptrunner uses a ``.ini`` file to control some configuration
+sections. The file has three sections; ``[products]``,
+``[paths]`` and ``[web-platform-tests]``.
+
+``[products]`` is used to
+define the set of available products. By default this section is empty
+which means that all the products distributed with wptrunner are
+enabled (although their dependencies may not be installed). The set
+of enabled products can be set by using the product name as the
+key. For built in products the value is empty. It is also possible to
+provide the path to a script implementing the browser functionality
+e.g.::
+
+  [products]
+  chrome =
+  netscape4 = path/to/netscape.py
+
+``[paths]`` specifies the default paths for the tests and metadata,
+relative to the config file. For example::
+
+  [paths]
+  tests = checkouts/web-platform-tests
+  metadata = /home/example/wpt/metadata
+
+
+``[web-platform-tests]`` is used to set the properties of the upstream
+repository when updating the paths. ``remote_url`` specifies the git
+url to pull from; ``branch`` the branch to sync against and
+``sync_path`` the local path, relative to the configuration file, to
+use when checking out the tests e.g.::
+
+  [web-platform-tests]
+  remote_url = https://github.com/w3c/web-platform-tests.git
+  branch = master
+  sync_path = sync
+
+A configuration file must contain all the above fields; falling back
+to the default values for unspecified fields is not yet supported.
+
+The ``wptrunner`` and ``wptupdate`` commands will use configuration
+files in the following order:
+
+ * Any path supplied with a ``--config`` flag to the command.
+
+ * A file called ``wptrunner.ini`` in the current directory
+
+ * The default configuration file (``wptrunner.default.ini`` in the
+   source directory)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/requirements.txt
@@ -0,0 +1,5 @@
+html5lib >= 0.99
+mozinfo >= 0.7
+mozlog >= 1.8
+# Unfortunately, just for gdb flags
+mozrunner >= 6.1
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/requirements_b2g.txt
@@ -0,0 +1,7 @@
+fxos_appgen >= 0.5
+mozdevice >= 0.37
+gaiatest >= 0.26
+marionette_client >= 0.7.10
+moznetwork >= 0.24
+mozprofile >= 0.21
+mozrunner >= 6.1
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/requirements_chrome.txt
@@ -0,0 +1,2 @@
+mozprocess >= 0.19
+selenium >= 2.41.0
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/requirements_firefox.txt
@@ -0,0 +1,4 @@
+marionette_client >= 0.7.10
+mozprofile >= 0.21
+mozprocess >= 0.19
+mozcrash >= 0.13
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/requirements_servo.txt
@@ -0,0 +1,1 @@
+mozprocess >= 0.19
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/setup.py
@@ -0,0 +1,71 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import glob
+import os
+import sys
+import textwrap
+
+from setuptools import setup, find_packages
+
+PACKAGE_NAME = 'wptrunner'
+PACKAGE_VERSION = '1.1.1'
+
+# Dependencies
+with open('requirements.txt') as f:
+    deps = f.read().splitlines()
+
+# Browser-specific requirements
+requirements_files = glob.glob("requirements_*.txt")
+
+profile_dest = None
+dest_exists = False
+
+setup(name=PACKAGE_NAME,
+      version=PACKAGE_VERSION,
+      description="Harness for running the W3C web-platform-tests against various products",
+      author='Mozilla Automation and Testing Team',
+      author_email='tools@lists.mozilla.org',
+      license='MPL 2.0',
+      packages=find_packages(exclude=["tests", "metadata", "prefs"]),
+      entry_points={
+          'console_scripts': [
+              'wptrunner = wptrunner.wptrunner:main',
+              'wptupdate = wptrunner.update:main',
+          ]
+      },
+      zip_safe=False,
+      platforms=['Any'],
+      classifiers=['Development Status :: 4 - Beta',
+                   'Environment :: Console',
+                   'Intended Audience :: Developers',
+                   'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
+                   'Operating System :: OS Independent'],
+      package_data={"wptrunner": ["executors/testharness_marionette.js",
+                                  "executors/testharness_webdriver.js",
+                                  "executors/reftest.js",
+                                  "executors/reftest-wait.js",
+                                  "testharnessreport.js",
+                                  "testharness_runner.html",
+                                  "config.json",
+                                  "wptrunner.default.ini",
+                                  "browsers/server-locations.txt",
+                                  "browsers/b2g_setup/*",
+                                  "prefs/*"]},
+      include_package_data=True,
+      data_files=[("requirements", requirements_files)],
+      install_requires=deps
+     )
+
+if "install" in sys.argv:
+    path = os.path.relpath(os.path.join(sys.prefix, "requirements"), os.curdir)
+    print textwrap.fill("""In order to use with one of the built-in browser
+products, you will need to install the extra dependencies. These are provided
+as requirements_[name].txt in the %s directory and can be installed using
+e.g.""" % path, 80)
+
+    print """
+
+pip install -r %s/requirements_firefox.txt
+""" % path
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner.default.ini
@@ -0,0 +1,10 @@
+[products]
+
+[web-platform-tests]
+remote_url = https://github.com/w3c/web-platform-tests.git
+branch = master
+sync_path = %(pwd)s/sync
+
+[paths]
+tests = %(pwd)s/tests
+metadata = %(pwd)s/meta
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/__init__.py
@@ -0,0 +1,32 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Subpackage where each product is defined. Each product is created by adding a
+a .py file containing a __wptrunner__ variable in the global scope. This must be
+a dictionary with the fields
+
+"product": Name of the product, assumed to be unique.
+"browser": String indicating the Browser implementation used to launch that
+           product.
+"executor": Dictionary with keys as supported test types and values as the name
+            of the Executor implemantation that will be used to run that test
+            type.
+"browser_kwargs": String naming function that takes product, binary,
+                  prefs_root and the wptrunner.run_tests kwargs dict as arguments
+                  and returns a dictionary of kwargs to use when creating the
+                  Browser class.
+"executor_kwargs": String naming a function that takes http server url and
+                   timeout multiplier and returns kwargs to use when creating
+                   the executor class.
+"env_options": String naming a funtion of no arguments that returns the
+               arguments passed to the TestEnvironment.
+
+All classes and functions named in the above dict must be imported into the
+module global scope.
+"""
+
+product_list = ["b2g",
+                "chrome",
+                "firefox",
+                "servo"]
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/b2g.py
@@ -0,0 +1,258 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import tempfile
+import shutil
+import subprocess
+
+import fxos_appgen
+import gaiatest
+import mozdevice
+import moznetwork
+import mozrunner
+from mozprofile import FirefoxProfile, Preferences
+
+from .base import get_free_port, BrowserError, Browser, ExecutorBrowser
+from ..executors.executormarionette import MarionetteTestharnessExecutor, required_files
+from ..hosts import HostsFile, HostsLine
+
+here = os.path.split(__file__)[0]
+
+__wptrunner__ = {"product": "b2g",
+                 "check_args": "check_args",
+                 "browser": "B2GBrowser",
+                 "executor": {"testharness": "B2GMarionetteTestharnessExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    pass
+
+
+def browser_kwargs(**kwargs):
+    return {"prefs_root": kwargs["prefs_root"],
+            "no_backup": kwargs.get("b2g_no_backup", False)}
+
+
+def executor_kwargs(http_server_url, **kwargs):
+    timeout_multiplier = kwargs["timeout_multiplier"]
+    if timeout_multiplier is None:
+        timeout_multiplier = 2
+
+    executor_kwargs = {"http_server_url": http_server_url,
+                       "timeout_multiplier": timeout_multiplier,
+                       "close_after_done": False}
+    return executor_kwargs
+
+
+def env_options():
+    return {"host": "web-platform.test",
+            "bind_hostname": "false",
+            "test_server_port": False,
+            "required_files": required_files}
+
+
+class B2GBrowser(Browser):
+    used_ports = set()
+    init_timeout = 180
+
+    def __init__(self, logger, prefs_root, no_backup=False):
+        Browser.__init__(self, logger)
+        logger.info("Waiting for device")
+        subprocess.call(["adb", "wait-for-device"])
+        self.device = mozdevice.DeviceManagerADB()
+        self.marionette_port = get_free_port(2828, exclude=self.used_ports)
+        self.used_ports.add(self.marionette_port)
+        self.cert_test_app = None
+        self.runner = None
+        self.prefs_root = prefs_root
+
+        self.no_backup = no_backup
+        self.backup_path = None
+        self.backup_paths = []
+        self.backup_dirs = []
+
+    def setup(self):
+        self.logger.info("Running B2G setup")
+        self.backup_path = tempfile.mkdtemp()
+
+        self.logger.debug("Backing up device to %s"  % (self.backup_path,))
+
+        if not self.no_backup:
+            self.backup_dirs = [("/data/local", os.path.join(self.backup_path, "local")),
+                                ("/data/b2g/mozilla", os.path.join(self.backup_path, "profile"))]
+
+            self.backup_paths = [("/system/etc/hosts", os.path.join(self.backup_path, "hosts"))]
+
+            for remote, local in self.backup_dirs:
+                self.device.getDirectory(remote, local)
+
+            for remote, local in self.backup_paths:
+                self.device.getFile(remote, local)
+
+        self.setup_hosts()
+
+    def start(self):
+        profile = FirefoxProfile()
+
+        profile.set_preferences({"dom.disable_open_during_load": False,
+                                 "marionette.defaultPrefs.enabled": True})
+
+        self.logger.debug("Creating device runner")
+        self.runner = mozrunner.B2GDeviceRunner(profile=profile)
+        self.logger.debug("Starting device runner")
+        self.runner.start()
+        self.logger.debug("Device runner started")
+
+    def setup_hosts(self):
+        hostnames = ["web-platform.test",
+                     "www.web-platform.test",
+                     "www1.web-platform.test",
+                     "www2.web-platform.test",
+                     "xn--n8j6ds53lwwkrqhv28a.web-platform.test",
+                     "xn--lve-6lad.web-platform.test"]
+
+        host_ip = moznetwork.get_ip()
+
+        temp_dir = tempfile.mkdtemp()
+        hosts_path = os.path.join(temp_dir, "hosts")
+        remote_path = "/system/etc/hosts"
+        try:
+            self.device.getFile("/system/etc/hosts", hosts_path)
+
+            with open(hosts_path) as f:
+                hosts_file = HostsFile.from_file(f)
+
+            for canonical_hostname in hostnames:
+                hosts_file.set_host(HostsLine(host_ip, canonical_hostname))
+
+            with open(hosts_path, "w") as f:
+                hosts_file.to_file(f)
+
+            self.logger.info("Installing hosts file")
+
+            self.device.remount()
+            self.device.removeFile(remote_path)
+            self.device.pushFile(hosts_path, remote_path)
+        finally:
+            os.unlink(hosts_path)
+            os.rmdir(temp_dir)
+
+    def load_prefs(self):
+        prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
+        if os.path.exists(prefs_path):
+            preferences = Preferences.read_prefs(prefs_path)
+        else:
+            self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
+            preferences = []
+
+        return preferences
+
+    def stop(self):
+        pass
+
+    def on_output(self):
+        raise NotImplementedError
+
+    def cleanup(self):
+        self.logger.debug("Running browser cleanup steps")
+
+        self.device.remount()
+
+        for remote, local in self.backup_dirs:
+            self.device.removeDir(remote)
+            self.device.pushDir(local, remote)
+
+        for remote, local in self.backup_paths:
+            self.device.removeFile(remote)
+            self.device.pushFile(local, remote)
+
+        shutil.rmtree(self.backup_path)
+        self.device.reboot(wait=True)
+
+    def pid(self):
+        return "Remote"
+
+    def is_alive(self):
+        return True
+
+    def executor_browser(self):
+        return B2GExecutorBrowser, {"marionette_port": self.marionette_port}
+
+
+class B2GExecutorBrowser(ExecutorBrowser):
+    # The following methods are called from a different process
+    def __init__(self, *args, **kwargs):
+        ExecutorBrowser.__init__(self, *args, **kwargs)
+
+        import sys, subprocess
+
+        self.dm = mozdevice.DeviceManagerADB()
+        self.dm.forward("tcp:%s" % self.marionette_port,
+                        "tcp:2828")
+        self.executor = None
+        self.marionette = None
+        self.gaia_device = None
+        self.gaia_apps = None
+
+    def after_connect(self, executor):
+        self.executor = executor
+        self.marionette = executor.marionette
+        self.executor.logger.debug("Running browser.after_connect steps")
+
+        self.gaia_apps = gaiatest.GaiaApps(marionette=executor.marionette)
+
+        self.executor.logger.debug("Waiting for homescreen to load")
+
+        # Moved out of gaia_test temporarily
+        self.executor.logger.info("Waiting for B2G to be ready")
+        self.wait_for_homescreen(timeout=60)
+
+        self.install_cert_app()
+        self.use_cert_app()
+
+    def install_cert_app(self):
+        """Install the container app used to run the tests"""
+        if fxos_appgen.is_installed("CertTest App"):
+            self.executor.logger.info("CertTest App is already installed")
+            return
+        self.executor.logger.info("Installing CertTest App")
+        app_path = os.path.join(here, "b2g_setup", "certtest_app.zip")
+        fxos_appgen.install_app("CertTest App", app_path, marionette=self.marionette)
+        self.executor.logger.debug("Install complete")
+
+    def use_cert_app(self):
+        """Start the app used to run the tests"""
+        self.executor.logger.info("Homescreen loaded")
+        self.gaia_apps.launch("CertTest App")
+
+    def wait_for_homescreen(self, timeout):
+        self.executor.logger.info("Waiting for homescreen")
+        self.marionette.execute_async_script("""
+let manager = window.wrappedJSObject.AppWindowManager || window.wrappedJSObject.WindowManager;
+let app = null;
+if (manager) {
+  app = ('getActiveApp' in manager) ? manager.getActiveApp() : manager.getCurrentDisplayedApp();
+}
+if (app) {
+  log('Already loaded home screen');
+  marionetteScriptFinished();
+} else {
+  log('waiting for mozbrowserloadend');
+  window.addEventListener('mozbrowserloadend', function loaded(aEvent) {
+    log('received mozbrowserloadend for ' + aEvent.target.src);
+    if (aEvent.target.src.indexOf('ftu') != -1 || aEvent.target.src.indexOf('homescreen') != -1) {
+      window.removeEventListener('mozbrowserloadend', loaded);
+      marionetteScriptFinished();
+    }
+  });
+}""", script_timeout=1000 * timeout)
+
+class B2GMarionetteTestharnessExecutor(MarionetteTestharnessExecutor):
+    def after_connect(self):
+        self.browser.after_connect(self)
+        MarionetteTestharnessExecutor.after_connect(self)
new file mode 100644
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/base.py
@@ -0,0 +1,139 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import platform
+import socket
+from abc import ABCMeta, abstractmethod
+
+from ..wptcommandline import require_arg
+
+here = os.path.split(__file__)[0]
+
+
+def cmd_arg(name, value=None):
+    prefix = "-" if platform.system() == "Windows" else "--"
+    rv = prefix + name
+    if value is not None:
+        rv += "=" + value
+    return rv
+
+
+def get_free_port(start_port, exclude=None):
+    """Get the first port number after start_port (inclusive) that is
+    not currently bound.
+
+    :param start_port: Integer port number at which to start testing.
+    :param exclude: Set of port numbers to skip"""
+    port = start_port
+    while True:
+        if exclude and port in exclude:
+            port += 1
+            continue
+        s = socket.socket()
+        try:
+            s.bind(("127.0.0.1", port))
+        except socket.error:
+            port += 1
+        else:
+            return port
+        finally:
+            s.close()
+
+
+class BrowserError(Exception):
+    pass
+
+
+class Browser(object):
+    __metaclass__ = ABCMeta
+
+    process_cls = None
+    init_timeout = 30
+
+    def __init__(self, logger):
+        """Abstract class serving as the basis for Browser implementations.
+
+        The Browser is used in the TestRunnerManager to start and stop the browser
+        process, and to check the state of that process. This class also acts as a
+        context manager, enabling it to do browser-specific setup at the start of
+        the testrun and cleanup after the run is complete.
+
+        :param logger: Structured logger to use for output.
+        """
+        self.logger = logger
+
+    def __enter__(self):
+        self.setup()
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        self.cleanup()
+
+    def setup(self):
+        """Used for browser-specific setup that happens at the start of a test run"""
+        pass
+
+    @abstractmethod
+    def start(self):
+        """Launch the browser object and get it into a state where is is ready to run tests"""
+        pass
+
+    @abstractmethod
+    def stop(self):
+        """Stop the running browser process."""
+        pass
+
+    @abstractmethod
+    def on_output(self, line):
+        """Callback function used with ProcessHandler to handle output from the browser process."""
+        pass
+
+    @abstractmethod
+    def is_alive(self):
+        """Boolean indicating whether the browser process is still running"""
+        pass
+
+    def cleanup(self):
+        """Browser-specific cleanup that is run after the testrun is finished"""
+        pass
+
+    def executor_browser(self):
+        """Returns the ExecutorBrowser subclass for this Browser subclass and the keyword arguments
+        with which it should be instantiated"""
+        return ExecutorBrowser, {}
+
+    def log_crash(self, logger, process, test):
+        """Return a list of dictionaries containing information about crashes that happend
+        in the browser, or an empty list if no crashes occurred"""
+        logger.crash(process, test)
+
+class NullBrowser(Browser):
+    def start(self):
+        """No-op browser to use in scenarios where the TestRunnerManager shouldn't
+        actually own the browser process (e.g. Servo where we start one browser
+        per test)"""
+        pass
+
+    def stop(self):
+        pass
+
+    def is_alive(self):
+        return True
+
+    def on_output(self, line):
+        raise NotImplementedError
+
+class ExecutorBrowser(object):
+    def __init__(self, **kwargs):
+        """View of the Browser used by the Executor object.
+        This is needed because the Executor runs in a child process and
+        we can't ship Browser instances between processes on Windows.
+
+        Typically this will have a few product-specific properties set,
+        but in some cases it may have more elaborate methods for setting
+        up the browser from the runner process.
+        """
+        for k, v in kwargs.iteritems():
+            setattr(self, k, v)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/chrome.py
@@ -0,0 +1,89 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozprocess
+
+from .base import get_free_port, Browser, ExecutorBrowser, require_arg, cmd_arg
+from ..executors.executorselenium import SeleniumTestharnessExecutor, required_files
+
+
+here = os.path.split(__file__)[0]
+
+__wptrunner__ = {"product": "chrome",
+                 "check_args": "check_args",
+                 "browser": "ChromeBrowser",
+                 "executor": {"testharness": "SeleniumTestharnessExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+
+
+def browser_kwargs(**kwargs):
+    return {"binary": kwargs["binary"]}
+
+
+def executor_kwargs(http_server_url, **kwargs):
+    from selenium import webdriver
+
+    timeout_multiplier = kwargs["timeout_multiplier"]
+    if timeout_multiplier is None:
+        timeout_multiplier = 1
+
+    return {"http_server_url": http_server_url,
+            "timeout_multiplier": timeout_multiplier,
+            "capabilities": webdriver.DesiredCapabilities.CHROME}
+
+
+def env_options():
+    return {"host": "localhost",
+            "bind_hostname": "true",
+            "required_files": required_files}
+
+
+class ChromeBrowser(Browser):
+    used_ports = set()
+
+    def __init__(self, logger, binary):
+        Browser.__init__(self, logger)
+        self.binary = binary
+        self.webdriver_port = get_free_port(4444, exclude=self.used_ports)
+        self.used_ports.add(self.webdriver_port)
+        self.proc = None
+        self.cmd = None
+
+    def start(self):
+        self.cmd = [self.binary,
+                    cmd_arg("port", str(self.webdriver_port)),
+                    cmd_arg("url-base", "wd/url")]
+        self.proc = mozprocess.ProcessHandler(self.cmd, processOutputLine=self.on_output)
+        self.logger.debug("Starting chromedriver")
+        self.proc.run()
+
+    def stop(self):
+        if self.proc is not None and hasattr(self.proc, "proc"):
+            self.proc.kill()
+
+    def pid(self):
+        if self.proc is not None:
+            return self.proc.pid
+
+    def on_output(self, line):
+        self.logger.process_output(self.pid(),
+                                   line.decode("utf8", "replace"),
+                                   command=" ".join(self.cmd))
+
+    def is_alive(self):
+        return self.pid() is not None
+
+    def cleanup(self):
+        self.stop()
+
+    def executor_browser(self):
+        return ExecutorBrowser, {"webdriver_port": self.webdriver_port}
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/firefox.py
@@ -0,0 +1,151 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import mozcrash
+from mozprocess import ProcessHandler
+from mozprofile import FirefoxProfile, Preferences
+from mozprofile.permissions import ServerLocations
+from mozrunner import FirefoxRunner
+import mozcrash
+from mozcrash import mozcrash
+
+from .base import get_free_port, Browser, ExecutorBrowser, require_arg, cmd_arg
+from ..executors import executor_kwargs as base_executor_kwargs
+from ..executors.executormarionette import MarionetteTestharnessExecutor, MarionetteReftestExecutor, required_files
+
+here = os.path.join(os.path.split(__file__)[0])
+
+__wptrunner__ = {"product": "firefox",
+                 "check_args": "check_args",
+                 "browser": "FirefoxBrowser",
+                 "executor": {"testharness": "MarionetteTestharnessExecutor",
+                              "reftest": "MarionetteReftestExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+
+
+def browser_kwargs(**kwargs):
+    return {"binary": kwargs["binary"],
+            "prefs_root": kwargs["prefs_root"],
+            "debug_args": kwargs["debug_args"],
+            "interactive": kwargs["interactive"],
+            "symbols_path":kwargs["symbols_path"],
+            "stackwalk_binary":kwargs["stackwalk_binary"]}
+
+
+def executor_kwargs(http_server_url, **kwargs):
+    executor_kwargs = base_executor_kwargs(http_server_url, **kwargs)
+    executor_kwargs["close_after_done"] = True
+    executor_kwargs["http_server_override"] = "http://web-platform.test:8000"
+    return executor_kwargs
+
+
+def env_options():
+    return {"host": "localhost",
+            "bind_hostname": "true",
+            "required_files": required_files}
+
+
+class FirefoxBrowser(Browser):
+    used_ports = set()
+
+    def __init__(self, logger, binary, prefs_root, debug_args=None, interactive=None,
+                 symbols_path=None, stackwalk_binary=None):
+        Browser.__init__(self, logger)
+        self.binary = binary
+        self.prefs_root = prefs_root
+        self.marionette_port = None
+        self.used_ports.add(self.marionette_port)
+        self.runner = None
+        self.debug_args = debug_args
+        self.interactive = interactive
+        self.profile = None
+        self.symbols_path = symbols_path
+        self.stackwalk_binary = stackwalk_binary
+
+    def start(self):
+        self.marionette_port = get_free_port(2828, exclude=self.used_ports)
+
+        env = os.environ.copy()
+        env["MOZ_CRASHREPORTER"] = "1"
+        env["MOZ_CRASHREPORTER_SHUTDOWN"] = "1"
+        env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
+        env["MOZ_DISABLE_NONLOCAL_CONNECTIONS"] = "1"
+
+        locations = ServerLocations(filename=os.path.join(here, "server-locations.txt"))
+
+        preferences = self.load_prefs()
+
+        self.profile = FirefoxProfile(locations=locations, proxy=True, preferences=preferences)
+        self.profile.set_preferences({"marionette.defaultPrefs.enabled": True,
+                                      "marionette.defaultPrefs.port": self.marionette_port,
+                                      "dom.disable_open_during_load": False})
+
+        self.runner = FirefoxRunner(profile=self.profile,
+                                    binary=self.binary,
+                                    cmdargs=[cmd_arg("marionette"), "about:blank"],
+                                    env=env,
+                                    process_class=ProcessHandler,
+                                    process_args={"processOutputLine": [self.on_output]})
+
+        self.logger.debug("Starting Firefox")
+        self.runner.start(debug_args=self.debug_args, interactive=self.interactive)
+        self.logger.debug("Firefox Started")
+
+    def load_prefs(self):
+        prefs_path = os.path.join(self.prefs_root, "prefs_general.js")
+        if os.path.exists(prefs_path):
+            preferences = Preferences.read_prefs(prefs_path)
+        else:
+            self.logger.warning("Failed to find base prefs file in %s" % prefs_path)
+            preferences = []
+
+        return preferences
+
+    def stop(self):
+        self.logger.debug("Stopping browser")
+        if self.runner is not None:
+            try:
+                self.runner.stop()
+            except OSError:
+                # This can happen on Windows if the process is already dead
+                pass
+
+    def pid(self):
+        if self.runner.process_handler is None:
+            return None
+
+        try:
+            return self.runner.process_handler.pid
+        except AttributeError:
+            return None
+
+    def on_output(self, line):
+        """Write a line of output from the firefox process to the log"""
+        self.logger.process_output(self.pid(),
+                                   line.decode("utf8", "replace"),
+                                   command=" ".join(self.runner.command))
+
+    def is_alive(self):
+        return self.runner.is_running()
+
+    def cleanup(self):
+        self.stop()
+
+    def executor_browser(self):
+        assert self.marionette_port is not None
+        return ExecutorBrowser, {"marionette_port": self.marionette_port}
+
+    def log_crash(self, logger, process, test):
+        dump_dir = os.path.join(self.profile.profile, "minidumps")
+        mozcrash.log_crashes(logger, dump_dir, symbols_path=self.symbols_path,
+                             stackwalk_binary=self.stackwalk_binary,
+                             process=process, test=test)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/server-locations.txt
@@ -0,0 +1,29 @@
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# See /build/pgo/server-locations.txt for documentation on the format
+
+http://localhost:8000    primary
+
+http://web-platform.test:8000
+http://www.web-platform.test:8000
+http://www1.web-platform.test:8000
+http://www2.web-platform.test:8000
+http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8000
+http://xn--lve-6lad.web-platform.test:8000
+
+http://web-platform.test:8001
+http://www.web-platform.test:8001
+http://www1.web-platform.test:8001
+http://www2.web-platform.test:8001
+http://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8001
+http://xn--lve-6lad.web-platform.test:8001
+
+ws://web-platform.test:8888
+ws://www.web-platform.test:8888
+ws://www1.web-platform.test:8888
+ws://www2.web-platform.test:8888
+ws://xn--n8j6ds53lwwkrqhv28a.web-platform.test:8888
+ws://xn--lve-6lad.web-platform.test:8888
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/browsers/servo.py
@@ -0,0 +1,47 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+from .base import NullBrowser, ExecutorBrowser, require_arg
+from ..executors import executor_kwargs
+from ..executors.executorservo import ServoTestharnessExecutor
+
+here = os.path.join(os.path.split(__file__)[0])
+
+__wptrunner__ = {"product": "servo",
+                 "check_args": "check_args",
+                 "browser": "ServoBrowser",
+                 "executor": {"testharness": "ServoTestharnessExecutor"},
+                 "browser_kwargs": "browser_kwargs",
+                 "executor_kwargs": "executor_kwargs",
+                 "env_options": "env_options"}
+
+
+def check_args(**kwargs):
+    require_arg(kwargs, "binary")
+
+
+def browser_kwargs(**kwargs):
+    return {"binary": kwargs["binary"],
+            "debug_args": kwargs["debug_args"],
+            "interactive": kwargs["interactive"]}
+
+
+def env_options():
+    return {"host": "localhost",
+            "bind_hostname": "true"}
+
+
+class ServoBrowser(NullBrowser):
+    def __init__(self, logger, binary, debug_args=None, interactive=False):
+        NullBrowser.__init__(self, logger)
+        self.binary = binary
+        self.debug_args = debug_args
+        self.interactive = interactive
+
+    def executor_browser(self):
+        return ExecutorBrowser, {"binary": self.binary,
+                                 "debug_args": self.debug_args,
+                                 "interactive": self.interactive}
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/config.json
@@ -0,0 +1,6 @@
+{"host": "%(host)s",
+ "ports":{"http":[8000, 8001],
+          "https":[],
+          "ws":[8888]},
+ "check_subdomains":false,
+ "bind_hostname":%(bind_hostname)s}
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/config.py
@@ -0,0 +1,62 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import ConfigParser
+import os
+import sys
+
+here = os.path.split(__file__)[0]
+
+class ConfigDict(dict):
+    def __init__(self, base_path, *args, **kwargs):
+        self.base_path = base_path
+        dict.__init__(self, *args, **kwargs)
+
+    def get_path(self, key):
+        pwd = os.path.abspath(os.path.curdir)
+        path = self[key]
+        os.path.expanduser(path)
+        return os.path.join(self.base_path, path)
+
+def read(config_path):
+    config_path = os.path.abspath(config_path)
+    config_root = os.path.split(config_path)[0]
+    parser = ConfigParser.SafeConfigParser()
+    success = parser.read(config_path)
+    assert config_path in success, success
+
+    subns = {"pwd": os.path.abspath(os.path.curdir)}
+
+    rv = {}
+    for section in parser.sections():
+        rv[section] = ConfigDict(config_root)
+        for key in parser.options(section):
+            rv[section][key] = parser.get(section, key, False, subns)
+
+    return rv
+
+def path(argv=None):
+    if argv is None:
+        argv = []
+    path = None
+
+    for i, arg in enumerate(argv):
+        if arg == "--config":
+            if i + 1 < len(argv):
+                path = argv[i + 1]
+        elif arg.startswith("--config="):
+            path = arg.split("=", 1)[1]
+        if path is not None:
+            break
+
+    if path is None:
+        if os.path.exists("wptrunner.ini"):
+            path = os.path.abspath("wptrunner.ini")
+        else:
+            path = os.path.join(here, "..", "wptrunner.default.ini")
+
+    return os.path.abspath(path)
+
+def load():
+    return read(path(sys.argv))
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/__init__.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from base import (executor_kwargs,
+                  testharness_result_converter,
+                  reftest_result_converter,
+                  TestExecutor)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/base.py
@@ -0,0 +1,101 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import os
+from abc import ABCMeta, abstractmethod
+
+here = os.path.split(__file__)[0]
+
+
+def executor_kwargs(http_server_url, **kwargs):
+    timeout_multiplier = kwargs["timeout_multiplier"]
+    if timeout_multiplier is None:
+        timeout_multiplier = 1
+
+    executor_kwargs = {"http_server_url": http_server_url,
+                       "timeout_multiplier": timeout_multiplier}
+    return executor_kwargs
+
+
+class TestharnessResultConverter(object):
+    harness_codes = {0: "OK",
+                     1: "ERROR",
+                     2: "TIMEOUT"}
+
+    test_codes = {0: "PASS",
+                  1: "FAIL",
+                  2: "TIMEOUT",
+                  3: "NOTRUN"}
+
+    def __call__(self, test, result):
+        """Convert a JSON result into a (TestResult, [SubtestResult]) tuple"""
+        assert result["test"] == test.url, ("Got results from %s, expected %s" %
+                                            (result["test"], test.url))
+        harness_result = test.result_cls(self.harness_codes[result["status"]], result["message"])
+        return (harness_result,
+                [test.subtest_result_cls(subtest["name"], self.test_codes[subtest["status"]],
+                                         subtest["message"]) for subtest in result["tests"]])
+testharness_result_converter = TestharnessResultConverter()
+
+
+def reftest_result_converter(self, test, result):
+    return (test.result_cls(result["status"], result["message"]), [])
+
+
+class TestExecutor(object):
+    __metaclass__ = ABCMeta
+
+    convert_result = None
+
+    def __init__(self, browser, http_server_url, timeout_multiplier=1,
+                 http_server_override=None):
+        """Abstract Base class for object that actually executes the tests in a
+        specific browser. Typically there will be a different TestExecutor
+        subclass for each test type and method of executing tests.
+
+        :param browser: ExecutorBrowser instance providing properties of the
+                        browser that will be tested.
+        :param http_server_url: Base url of the http server on which the tests
+                                are running.
+        :param timeout_multiplier: Multiplier relative to base timeout to use
+                                   when setting test timeout.
+        :param http_server_override: Server location to use in place of the
+                                     server-supplied default. This is primarily
+                                     useful when using a proxy to reroute requests
+                                     form e.g. web-platform.test to localhost.
+        """
+
+        if http_server_override is not None:
+            http_server_url = http_server_override
+
+        self.runner = None
+        self.browser = browser
+        self.http_server_url = http_server_url
+        self.timeout_multiplier = timeout_multiplier
+
+    @property
+    def logger(self):
+        """StructuredLogger for this executor"""
+        if self.runner is not None:
+            return self.runner.logger
+
+    @abstractmethod
+    def setup(self, runner):
+        """Run steps needed before tests can be started e.g. connecting to
+        browser instance
+
+        :param runner: TestRunner instance that is going to run the tests"""
+        pass
+
+    def teardown(self):
+        """Run cleanup steps after tests have finished"""
+        pass
+
+    @abstractmethod
+    def run_test(self, test):
+        """Run a particular test.
+
+        :param test: The test to run"""
+        pass
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/executormarionette.py
@@ -0,0 +1,278 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import hashlib
+import os
+import socket
+import sys
+import threading
+import time
+import traceback
+import urlparse
+import uuid
+from collections import defaultdict
+
+marionette = None
+
+here = os.path.join(os.path.split(__file__)[0])
+
+from .base import TestExecutor, testharness_result_converter, reftest_result_converter
+from ..testrunner import Stop
+
+# Extra timeout to use after internal test timeout at which the harness
+# should force a timeout
+extra_timeout = 5 # seconds
+
+required_files = [("testharness_runner.html", "", False),
+                  ("testharnessreport.js", "resources/", True)]
+
+
+def do_delayed_imports():
+    global marionette
+    import marionette
+
+
+class MarionetteTestExecutor(TestExecutor):
+    def __init__(self,
+                 browser,
+                 http_server_url,
+                 timeout_multiplier=1,
+                 close_after_done=True,
+                 http_server_override=None):
+        do_delayed_imports()
+
+        TestExecutor.__init__(self, browser, http_server_url, timeout_multiplier,
+                              http_server_override)
+        self.marionette_port = browser.marionette_port
+        self.marionette = None
+
+        self.timer = None
+        self.window_id = str(uuid.uuid4())
+        self.close_after_done = close_after_done
+
+    def setup(self, runner):
+        """Connect to browser via Marionette."""
+        self.runner = runner
+
+        self.logger.debug("Connecting to marionette on port %i" % self.marionette_port)
+        self.marionette = marionette.Marionette(host='localhost', port=self.marionette_port)
+        # XXX Move this timeout somewhere
+        self.logger.debug("Waiting for Marionette connection")
+        success = self.marionette.wait_for_port(60)
+        session_started = False
+        if success:
+            try:
+                self.logger.debug("Starting Marionette session")
+                self.marionette.start_session()
+            except Exception as e:
+                self.logger.warning("Starting marionette session failed: %s" % e)
+            else:
+                self.logger.debug("Marionette session started")
+                session_started = True
+
+        if not success or not session_started:
+            self.logger.warning("Failed to connect to Marionette")
+            self.runner.send_message("init_failed")
+        else:
+            try:
+                self.after_connect()
+            except Exception:
+                self.logger.warning("Post-connection steps failed")
+                self.logger.error(traceback.format_exc())
+                self.runner.send_message("init_failed")
+            else:
+                self.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        try:
+            self.marionette.delete_session()
+        except:
+            # This is typically because the session never started
+            pass
+        del self.marionette
+
+    def is_alive(self):
+        """Check if the marionette connection is still active"""
+        try:
+            # Get a simple property over the connection
+            self.marionette.current_window_handle
+        except:
+            return False
+        return True
+
+    def after_connect(self):
+        url = urlparse.urljoin(
+            self.http_server_url, "/testharness_runner.html")
+        self.logger.debug("Loading %s" % url)
+        try:
+            self.marionette.navigate(url)
+        except:
+            self.logger.critical(
+                "Loading initial page %s failed. Ensure that the "
+                "there are no other programs bound to this port and "
+                "that your firewall rules or network setup does not "
+                "prevent access." % url)
+            raise
+        self.marionette.execute_script(
+            "document.title = '%s'" % threading.current_thread().name.replace("'", '"'))
+
+    def run_test(self, test):
+        """Run a single test.
+
+        This method is independent of the test type, and calls
+        do_test to implement the type-sepcific testing functionality.
+        """
+        # Lock to prevent races between timeouts and other results
+        # This might not be strictly necessary if we need to deal
+        # with the result changing post-hoc anyway (e.g. due to detecting
+        # a crash after we get the data back from marionette)
+        result = None
+        result_flag = threading.Event()
+        result_lock = threading.Lock()
+
+        timeout = test.timeout * self.timeout_multiplier
+
+        def timeout_func():
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("EXTERNAL-TIMEOUT", None), [])
+                    self.runner.send_message("test_ended", test, result)
+
+        self.timer = threading.Timer(timeout + 2 * extra_timeout, timeout_func)
+        self.timer.start()
+
+        try:
+            self.marionette.set_script_timeout((timeout + extra_timeout) * 1000)
+        except IOError, marionette.errors.InvalidResponseException:
+            self.logger.error("Lost marionette connection before starting test")
+            return Stop
+
+        try:
+            result = self.convert_result(test, self.do_test(test, timeout))
+        except marionette.errors.ScriptTimeoutException:
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("EXTERNAL-TIMEOUT", None), [])
+            # Clean up any unclosed windows
+            # This doesn't account for the possibility the browser window
+            # is totally hung. That seems less likely since we are still
+            # getting data from marionette, but it might be just as well
+            # to do a full restart in this case
+            # XXX - this doesn't work at the moment because window_handles
+            # only returns OS-level windows (see bug 907197)
+            # while True:
+            #     handles = self.marionette.window_handles
+            #     self.marionette.switch_to_window(handles[-1])
+            #     if len(handles) > 1:
+            #         self.marionette.close()
+            #     else:
+            #         break
+            # Now need to check if the browser is still responsive and restart it if not
+        except (socket.timeout, marionette.errors.InvalidResponseException, IOError):
+            # This can happen on a crash
+            # Also, should check after the test if the firefox process is still running
+            # and otherwise ignore any other result and set it to crash
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("CRASH", None), [])
+        finally:
+            self.timer.cancel()
+
+        with result_lock:
+            if result:
+                self.runner.send_message("test_ended", test, result)
+
+    def do_test(self, test, timeout):
+        """Run the steps specific to a given test type for Marionette-based tests.
+
+        :param test: - the Test being run
+        :param timeout: - the timeout in seconds to give the test
+        """
+        raise NotImplementedError
+
+class MarionetteTestharnessExecutor(MarionetteTestExecutor):
+    convert_result = testharness_result_converter
+
+    def __init__(self, *args, **kwargs):
+        """Marionette-based executor for testharness.js tests"""
+        MarionetteTestExecutor.__init__(self, *args, **kwargs)
+        self.script = open(os.path.join(here, "testharness_marionette.js")).read()
+
+    def do_test(self, test, timeout):
+        assert len(self.marionette.window_handles) == 1
+        if self.close_after_done:
+            self.marionette.execute_script("if (window.wrappedJSObject.win) {window.wrappedJSObject.win.close()}")
+
+        return self.marionette.execute_async_script(
+            self.script % {"abs_url": urlparse.urljoin(self.http_server_url, test.url),
+                           "url": test.url,
+                           "window_id": self.window_id,
+                           "timeout_multiplier": self.timeout_multiplier,
+                           "timeout": timeout * 1000}, new_sandbox=False)
+
+
+class MarionetteReftestExecutor(MarionetteTestExecutor):
+    convert_result = reftest_result_converter
+
+    def __init__(self, *args, **kwargs):
+        """Marionette-based executor for reftests"""
+        MarionetteTestExecutor.__init__(self, *args, **kwargs)
+        with open(os.path.join(here, "reftest.js")) as f:
+            self.script = f.read()
+        with open(os.path.join(here, "reftest-wait.js")) as f:
+            self.wait_script = f.read()
+        self.ref_hashes = {}
+        self.ref_urls_by_hash = defaultdict(set)
+
+    def do_test(self, test, timeout):
+        test_url, ref_type, ref_url = test.url, test.ref_type, test.ref_url
+        hashes = {"test": None,
+                  "ref": self.ref_hashes.get(ref_url)}
+        self.marionette.execute_script(self.script)
+        self.marionette.switch_to_window(self.marionette.window_handles[-1])
+        for url_type, url in [("test", test_url), ("ref", ref_url)]:
+            if hashes[url_type] is None:
+                # Would like to do this in a new tab each time, but that isn't
+                # easy with the current state of marionette
+                full_url = urlparse.urljoin(self.http_server_url, url)
+                try:
+                    self.marionette.navigate(full_url)
+                except marionette.errors.MarionetteException:
+                    return {"status": "ERROR",
+                            "message": "Failed to load url %s" % (full_url,)}
+                if url_type == "test":
+                    self.wait()
+                screenshot = self.marionette.screenshot()
+                # strip off the data:img/png, part of the url
+                if screenshot.startswith("data:image/png;base64,"):
+                    screenshot = screenshot.split(",", 1)[1]
+                hashes[url_type] = hashlib.sha1(screenshot).hexdigest()
+
+        self.ref_urls_by_hash[hashes["ref"]].add(ref_url)
+        self.ref_hashes[ref_url] = hashes["ref"]
+
+        if ref_type == "==":
+            passed = hashes["test"] == hashes["ref"]
+        elif ref_type == "!=":
+            passed = hashes["test"] != hashes["ref"]
+        else:
+            raise ValueError
+
+        return {"status": "PASS" if passed else "FAIL",
+                "message": None}
+
+    def wait(self):
+        self.marionette.execute_async_script(self.wait_script)
+
+    def teardown(self):
+        count = 0
+        for hash_val, urls in self.ref_urls_by_hash.iteritems():
+            if len(urls) > 1:
+                self.logger.info("The following %i reference urls appear to be equivalent:\n %s" %
+                                 (len(urls), "\n  ".join(urls)))
+                count += len(urls) - 1
+        MarionetteTestExecutor.teardown(self)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/executorselenium.py
@@ -0,0 +1,190 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import socket
+import sys
+import threading
+import time
+import traceback
+import urlparse
+import uuid
+
+from .base import TestExecutor, testharness_result_converter
+from ..testrunner import Stop
+
+
+here = os.path.join(os.path.split(__file__)[0])
+
+webdriver = None
+exceptions = None
+
+required_files = [("testharness_runner.html", "", False),
+                  ("testharnessreport.js", "resources/", True)]
+
+
+def do_delayed_imports():
+    global webdriver
+    global exceptions
+    from selenium import webdriver
+    from selenium.common import exceptions
+
+
+class SeleniumTestExecutor(TestExecutor):
+    def __init__(self, browser, http_server_url, timeout_multiplier=1,
+                 http_server_override=None, **kwargs):
+        do_delayed_imports()
+        TestExecutor.__init__(self, browser, http_server_url, timeout_multiplier,
+                              http_server_override)
+        self.webdriver_port = browser.webdriver_port
+        self.webdriver = None
+
+        self.timer = None
+        self.window_id = str(uuid.uuid4())
+        self.capabilities = kwargs.pop("capabilities")
+
+    def setup(self, runner):
+        """Connect to browser via Selenium's WebDriver implementation."""
+        self.runner = runner
+        url = "http://localhost:%i/wd/url" % self.webdriver_port
+        self.logger.debug("Connecting to Selenium on URL: %s" % url)
+
+        session_started = False
+        try:
+            time.sleep(1)
+            self.webdriver = webdriver.Remote(
+                url, desired_capabilities=self.capabilities)
+            time.sleep(10)
+        except:
+            self.logger.warning(
+                "Connecting to Selenium failed:\n%s" % traceback.format_exc())
+            time.sleep(1)
+        else:
+            self.logger.debug("Selenium session started")
+            session_started = True
+
+        if not session_started:
+            self.logger.warning("Failed to connect to Selenium")
+            self.runner.send_message("init_failed")
+        else:
+            try:
+                self.after_connect()
+            except:
+                print >> sys.stderr, traceback.format_exc()
+                self.logger.warning(
+                    "Failed to connect to navigate initial page")
+                self.runner.send_message("init_failed")
+            else:
+                self.runner.send_message("init_succeeded")
+
+    def teardown(self):
+        try:
+            self.webdriver.quit()
+        except:
+            pass
+        del self.webdriver
+
+    def is_alive(self):
+        try:
+            # Get a simple property over the connection
+            self.webdriver.current_window_handle
+        # TODO what exception?
+        except (socket.timeout, exceptions.ErrorInResponseException):
+            return False
+        return True
+
+    def after_connect(self):
+        url = urlparse.urljoin(self.http_server_url, "/testharness_runner.html")
+        self.logger.debug("Loading %s" % url)
+        self.webdriver.get(url)
+        self.webdriver.execute_script("document.title = '%s'" %
+                                      threading.current_thread().name.replace("'", '"'))
+
+    def run_test(self, test):
+        """Run a single test.
+
+        This method is independent of the test type, and calls
+        do_test to implement the type-sepcific testing functionality.
+        """
+        # Lock to prevent races between timeouts and other results
+        # This might not be strictly necessary if we need to deal
+        # with the result changing post-hoc anyway (e.g. due to detecting
+        # a crash after we get the data back from webdriver)
+        result = None
+        result_flag = threading.Event()
+        result_lock = threading.Lock()
+
+        timeout = test.timeout * self.timeout_multiplier
+
+        def timeout_func():
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("EXTERNAL-TIMEOUT", None), [])
+                    self.runner.send_message("test_ended", test, result)
+
+        self.timer = threading.Timer(timeout + 10, timeout_func)
+        self.timer.start()
+
+        try:
+            self.webdriver.set_script_timeout((timeout + 5) * 1000)
+        except exceptions.ErrorInResponseException:
+            self.logger.error("Lost webdriver connection")
+            self.runner.send_message("restart_test", test)
+            return Stop
+
+        try:
+            result = self.convert_result(test, self.do_test(test, timeout))
+        except exceptions.TimeoutException:
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("EXTERNAL-TIMEOUT", None), [])
+            # Clean up any unclosed windows
+            # This doesn't account for the possibility the browser window
+            # is totally hung. That seems less likely since we are still
+            # getting data from marionette, but it might be just as well
+            # to do a full restart in this case
+            # XXX - this doesn't work at the moment because window_handles
+            # only returns OS-level windows (see bug 907197)
+            # while True:
+            #     handles = self.marionette.window_handles
+            #     self.marionette.switch_to_window(handles[-1])
+            #     if len(handles) > 1:
+            #         self.marionette.close()
+            #     else:
+            #         break
+            # Now need to check if the browser is still responsive and restart it if not
+
+        # TODO: try to detect crash here
+        except (socket.timeout, exceptions.ErrorInResponseException):
+            # This can happen on a crash
+            # Also, should check after the test if the firefox process is still running
+            # and otherwise ignore any other result and set it to crash
+            with result_lock:
+                if not result_flag.is_set():
+                    result_flag.set()
+                    result = (test.result_cls("CRASH", None), [])
+        finally:
+            self.timer.cancel()
+
+        with result_lock:
+            if result:
+                self.runner.send_message("test_ended", test, result)
+
+
+class SeleniumTestharnessExecutor(SeleniumTestExecutor):
+    convert_result = testharness_result_converter
+
+    def __init__(self, *args, **kwargs):
+        SeleniumTestExecutor.__init__(self, *args, **kwargs)
+        self.script = open(os.path.join(here, "testharness_webdriver.js")).read()
+
+    def do_test(self, test, timeout):
+        return self.webdriver.execute_async_script(
+            self.script % {"abs_url": urlparse.urljoin(self.http_server_url, test.url),
+                           "url": test.url,
+                           "window_id": self.window_id,
+                           "timeout_multiplier": self.timeout_multiplier,
+                           "timeout": timeout * 1000})
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/executorservo.py
@@ -0,0 +1,69 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import subprocess
+import threading
+import urlparse
+
+from mozprocess import ProcessHandler
+
+from .base import testharness_result_converter
+from .process import ProcessTestExecutor
+
+
+class ServoTestharnessExecutor(ProcessTestExecutor):
+    convert_result = testharness_result_converter
+
+    def __init__(self, *args, **kwargs):
+        ProcessTestExecutor.__init__(self, *args, **kwargs)
+        self.result_data = None
+        self.result_flag = None
+
+    def run_test(self, test):
+        self.result_data = None
+        self.result_flag = threading.Event()
+
+        self.command = [self.binary, "--hard-fail",
+                        urlparse.urljoin(self.http_server_url, test.url)]
+
+        if self.debug_args:
+            self.command = list(self.debug_args) + self.command
+
+
+        self.proc = ProcessHandler(self.command,
+                                   processOutputLine=[self.on_output])
+        self.proc.run()
+
+        timeout = test.timeout * self.timeout_multiplier
+
+        # Now wait to get the output we expect, or until we reach the timeout
+        self.result_flag.wait(timeout + 5)
+
+        if self.result_flag.is_set():
+            assert self.result_data is not None
+            self.result_data["test"] = test.url
+            result = self.convert_result(test, self.result_data)
+            self.proc.kill()
+        else:
+            if self.proc.pid is None:
+                result = (test.result_cls("CRASH", None), [])
+            else:
+                self.proc.kill()
+                result = (test.result_cls("TIMEOUT", None), [])
+        self.runner.send_message("test_ended", test, result)
+
+    def on_output(self, line):
+        prefix = "ALERT: RESULT: "
+        line = line.decode("utf8", "replace")
+        if line.startswith(prefix):
+            self.result_data = json.loads(line[len(prefix):])
+            self.result_flag.set()
+        else:
+            if self.interactive:
+                print line
+            else:
+                self.logger.process_output(self.proc.pid,
+                                           line,
+                                           " ".join(self.command))
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/process.py
@@ -0,0 +1,24 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .base import TestExecutor
+
+
+class ProcessTestExecutor(TestExecutor):
+    def __init__(self, *args, **kwargs):
+        TestExecutor.__init__(self, *args, **kwargs)
+        self.binary = self.browser.binary
+        self.debug_args = self.browser.debug_args
+        self.interactive = self.browser.interactive
+
+    def setup(self, runner):
+        self.runner = runner
+        self.runner.send_message("init_succeeded")
+        return True
+
+    def is_alive(self):
+        return True
+
+    def run_test(self, test):
+        raise NotImplementedError
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/reftest-wait.js
@@ -0,0 +1,17 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function test(x) {
+  log("classList: " + root.classList);
+  if (!root.classList.contains("reftest-wait")) {
+    observer.disconnect();
+    marionetteScriptFinished();
+  }
+}
+
+var root = document.documentElement;
+var observer = new MutationObserver(test);
+
+observer.observe(root, {attributes: true});
+test();
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/reftest.js
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var win = window.open("about:blank", "test", "width=600,height=600");
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/testharness_marionette.js
@@ -0,0 +1,23 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+window.wrappedJSObject.timeout_multiplier = %(timeout_multiplier)d;
+
+window.wrappedJSObject.done = function(tests, status) {
+  clearTimeout(timer);
+  var test_results = tests.map(function(x) {
+    return {name:x.name, status:x.status, message:x.message}
+  });
+  marionetteScriptFinished({test:"%(url)s",
+                            tests:test_results,
+                            status: status.status,
+                            message: status.message});
+}
+
+window.wrappedJSObject.win = window.open("%(abs_url)s", "%(window_id)s");
+
+var timer = setTimeout(function() {
+  log("Timeout fired");
+  window.wrappedJSObject.win.timeout();
+}, %(timeout)s);
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/executors/testharness_webdriver.js
@@ -0,0 +1,24 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var callback = arguments[arguments.length - 1];
+window.timeout_multiplier = %(timeout_multiplier)d;
+
+window.done = function(tests, status) {
+  clearTimeout(timer);
+  var test_results = tests.map(function(x) {
+    return {name:x.name, status:x.status, message:x.message}
+  });
+  callback({test:"%(url)s",
+            tests:test_results,
+            status: status.status,
+            message: status.message});
+}
+
+window.win = window.open("%(abs_url)s", "%(window_id)s");
+
+var timer = setTimeout(function() {
+  window.win.timeout();
+  window.win.close();
+}, %(timeout)s);
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/expected.py
@@ -0,0 +1,18 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+
+def expected_path(metadata_path, test_path):
+    """Path to the expectation data file for a given test path.
+
+    This is defined as metadata_path + relative_test_path + .ini
+
+    :param metadata_path: Path to the root of the metadata directory
+    :param test_path: Relative path to the test file from the test root
+    """
+    args = list(test_path.split("/"))
+    args[-1] += ".ini"
+    return os.path.join(metadata_path, *args)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/hosts.py
@@ -0,0 +1,104 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+
+class HostsLine(object):
+    def __init__(self, ip_address, canonical_hostname, aliases=None, comment=None):
+        self.ip_address = ip_address
+        self.canonical_hostname = canonical_hostname
+        self.aliases = aliases if aliases is not None else []
+        self.comment = comment
+        if self.ip_address is None:
+            assert self.canonical_hostname is None
+            assert not self.aliases
+            assert self.comment is not None
+
+    @classmethod
+    def from_string(cls, line):
+        if not line.strip():
+            return
+
+        line = line.strip()
+
+        ip_address = None
+        canonical_hostname = None
+        aliases = []
+        comment = None
+
+        comment_parts = line.split("#", 1)
+        if len(comment_parts) > 1:
+            comment = comment_parts[1]
+
+        data = comment_parts[0].strip()
+
+        if data:
+            fields = data.split()
+            if len(fields) < 2:
+                raise ValueError("Invalid hosts line")
+
+            ip_address = fields[0]
+            canonical_hostname = fields[1]
+            aliases = fields[2:]
+
+        return cls(ip_address, canonical_hostname, aliases, comment)
+
+
+class HostsFile(object):
+    def __init__(self):
+        self.data = []
+        self.by_hostname = {}
+
+    def set_host(self, host):
+        if host.canonical_hostname is None:
+            self.data.append(host)
+        elif host.canonical_hostname in self.by_hostname:
+            old_host = self.by_hostname[host.canonical_hostname]
+            old_host.ip_address = host.ip_address
+            old_host.aliases = host.aliases
+            old_host.comment = host.comment
+        else:
+            self.data.append(host)
+            self.by_hostname[host.canonical_hostname] = host
+
+    @classmethod
+    def from_file(cls, f):
+        rv = cls()
+        for line in f:
+            host = HostsLine.from_string(line)
+            if host is not None:
+                rv.set_host(host)
+        return rv
+
+    def to_string(self):
+        field_widths = [0, 0]
+        for line in self.data:
+            if line.ip_address is not None:
+                field_widths[0] = max(field_widths[0], len(line.ip_address))
+                field_widths[1] = max(field_widths[1], len(line.canonical_hostname))
+
+        lines = []
+
+        for host in self.data:
+            line = ""
+            if host.ip_address is not None:
+                ip_string = host.ip_address.ljust(field_widths[0])
+                hostname_str = host.canonical_hostname
+                if host.aliases:
+                    hostname_str = "%s %s" % (hostname_str.ljust(field_widths[1]),
+                                              " ".join(host.aliases))
+                line = "%s %s" % (ip_string, hostname_str)
+            if host.comment:
+                if line:
+                    line += " "
+                line += "#%s" % host.comment
+            lines.append(line)
+
+        lines.append("")
+
+        return "\n".join(lines)
+
+    def to_file(self, f):
+        f.write(self.to_string().encode("utf8"))
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/manifestexpected.py
@@ -0,0 +1,153 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from wptmanifest.backends import static
+from wptmanifest.backends.static import ManifestItem
+
+import expected
+
+"""Manifest structure used to store expected results of a test.
+
+Each manifest file is represented by an ExpectedManifest that
+has one or more TestNode children, one per test in the manifest.
+Each TestNode has zero or more SubtestNode children, one for each
+known subtest of the test.
+"""
+
+def data_cls_getter(output_node, visited_node):
+    # visited_node is intentionally unused
+    if output_node is None:
+        return ExpectedManifest
+    if isinstance(output_node, ExpectedManifest):
+        return TestNode
+    if isinstance(output_node, TestNode):
+        return SubtestNode
+    raise ValueError
+
+
+class ExpectedManifest(ManifestItem):
+    def __init__(self, name, test_path):
+        """Object representing all the tests in a particular manifest
+
+        :param name: Name of the AST Node associated with this object.
+                     Should always be None since this should always be associated with
+                     the root node of the AST.
+        :param test_path: Path of the test file associated with this manifest.
+        """
+        if name is not None:
+            raise ValueError("ExpectedManifest should represent the root node")
+        if test_path is None:
+            raise ValueError("ExpectedManifest requires a test path")
+        ManifestItem.__init__(self, name)
+        self.child_map = {}
+        self.test_path = test_path
+
+    def append(self, child):
+        """Add a test to the manifest"""
+        ManifestItem.append(self, child)
+        self.child_map[child.id] = child
+        assert len(self.child_map) == len(self.children)
+
+    def _remove_child(self, child):
+        del self.child_map[child.id]
+        ManifestItem.remove_child(self, child)
+        assert len(self.child_map) == len(self.children)
+
+    def get_test(self, test_id):
+        """Get a test from the manifest by ID
+
+        :param test_id: ID of the test to return."""
+        return self.child_map.get(test_id)
+
+
+class TestNode(ManifestItem):
+    def __init__(self, name):
+        """Tree node associated with a particular test in a manifest
+
+        :param name: name of the test"""
+        assert name is not None
+        ManifestItem.__init__(self, name)
+        self.updated_expected = []
+        self.new_expected = []
+        self.subtests = {}
+        self.default_status = None
+        self._from_file = True
+
+    @property
+    def is_empty(self):
+        required_keys = set(["type"])
+        if self.test_type == "reftest":
+            required_keys |= set(["reftype", "refurl"])
+        if set(self._data.keys()) != required_keys:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def test_type(self):
+        return self.get("type")
+
+    @property
+    def id(self):
+        components = self.parent.test_path.split("/")[:-1]
+        components.append(self.name)
+        url = "/" + "/".join(components)
+        if self.test_type == "reftest":
+            return (url, self.get("reftype"), self.get("refurl"))
+        else:
+            return url
+
+    def disabled(self):
+        """Boolean indicating whether the test is disabled"""
+        try:
+            return self.get("disabled")
+        except KeyError:
+            return False
+
+    def append(self, node):
+        """Add a subtest to the current test
+
+        :param node: AST Node associated with the subtest"""
+        child = ManifestItem.append(self, node)
+        self.subtests[child.name] = child
+
+    def get_subtest(self, name):
+        """Get the SubtestNode corresponding to a particular subtest, by name
+
+        :param name: Name of the node to return"""
+        if name in self.subtests:
+            return self.subtests[name]
+        return None
+
+
+class SubtestNode(TestNode):
+    def __init__(self, name):
+        """Tree node associated with a particular subtest in a manifest
+
+        :param name: name of the subtest"""
+        TestNode.__init__(self, name)
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return True
+
+
+def get_manifest(metadata_root, test_path, run_info):
+    """Get the ExpectedManifest for a particular test path, or None if there is no
+    metadata stored for that test path.
+
+    :param metadata_root: Absolute path to the root of the metadata directory
+    :param test_path: Path to the test(s) relative to the test root
+    :param run_info: Dictionary of properties of the test run for which the expectation
+                     values should be computed.
+    """
+    manifest_path = expected.expected_path(metadata_root, test_path)
+    try:
+        with open(manifest_path) as f:
+            return static.compile(f, run_info,
+                                  data_cls_getter=data_cls_getter,
+                                  test_path=test_path)
+    except IOError:
+        return None
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/manifestinclude.py
@@ -0,0 +1,106 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Manifest structure used to store paths that should be included in a test run.
+
+The manifest is represented by a tree of IncludeManifest objects, the root
+representing the file and each subnode representing a subdirectory that should
+be included or excluded.
+"""
+
+from wptmanifest.node import DataNode
+from wptmanifest.backends import conditional
+from wptmanifest.backends.conditional import ManifestItem
+
+
+class IncludeManifest(ManifestItem):
+    def __init__(self, node):
+        """Node in a tree structure representing the paths
+        that should be included or excluded from the test run.
+
+        :param node: AST Node corresponding to this Node.
+        """
+        ManifestItem.__init__(self, node)
+        self.child_map = {}
+
+    @classmethod
+    def create(cls):
+        """Create an empty IncludeManifest tree"""
+        node = DataNode(None)
+        return cls(node)
+
+    def append(self, child):
+        ManifestItem.append(self, child)
+        self.child_map[child.name] = child
+        assert len(self.child_map) == len(self.children)
+
+    def include(self, test):
+        """Return a boolean indicating whether a particular test should be
+        included in a test run, based on the IncludeManifest tree rooted on
+        this object.
+
+        :param test: The test object"""
+        path_components = self._get_path_components(test)
+        return self._include(test, path_components)
+
+    def _include(self, test, path_components):
+        if path_components:
+            next_path_part = path_components.pop()
+            if next_path_part in self.child_map:
+                return self.child_map[next_path_part]._include(test, path_components)
+
+        node = self
+        while node:
+            try:
+                skip_value = self.get("skip", {"test_type": test.item_type}).lower()
+                assert skip_value in ("true", "false")
+                return False if skip_value == "true" else True
+            except KeyError:
+                if node.parent is not None:
+                    node = node.parent
+                else:
+                    # Include by default
+                    return True
+
+    def _get_path_components(self, test):
+        test_url = test.url
+        assert test_url[0] == "/"
+        return [item for item in reversed(test_url.split("/")) if item]
+
+    def _add_rule(self, url, direction):
+        assert direction in ("include", "exclude")
+        components = [item for item in reversed(url.split("/")) if item]
+
+        node = self
+        while components:
+            component = components.pop()
+            if component not in node.child_map:
+                new_node = IncludeManifest(DataNode(component))
+                node.append(new_node)
+
+            node = node.child_map[component]
+
+        skip = False if direction == "include" else True
+        node.set("skip", str(skip))
+
+    def add_include(self, url_prefix):
+        """Add a rule indicating that tests under a url path
+        should be included in test runs
+
+        :param url_prefix: The url prefix to include
+        """
+        return self._add_rule(url_prefix, "include")
+
+    def add_exclude(self, url_prefix):
+        """Add a rule indicating that tests under a url path
+        should be excluded from test runs
+
+        :param url_prefix: The url prefix to exclude
+        """
+        return self._add_rule(url_prefix, "exclude")
+
+
+def get_manifest(manifest_path):
+    with open(manifest_path) as f:
+        return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/manifestupdate.py
@@ -0,0 +1,425 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from collections import namedtuple, defaultdict
+
+from wptmanifest.node import (DataNode, ConditionalNode, BinaryExpressionNode,
+                              BinaryOperatorNode, VariableNode, StringNode, NumberNode,
+                              UnaryExpressionNode, UnaryOperatorNode, KeyValueNode)
+from wptmanifest.backends import conditional
+from wptmanifest.backends.conditional import ManifestItem
+
+import expected
+
+"""Manifest structure used to update the expected results of a test
+
+Each manifest file is represented by an ExpectedManifest that has one
+or more TestNode children, one per test in the manifest.  Each
+TestNode has zero or more SubtestNode children, one for each known
+subtest of the test.
+
+In these representations, conditionals expressions in the manifest are
+not evaluated upfront but stored as python functions to be evaluated
+at runtime.
+
+When a result for a test is to be updated set_result on the
+[Sub]TestNode is called to store the new result, alongside the
+existing conditional that result's run info matched, if any. Once all
+new results are known, coalesce_expected is called to compute the new
+set of results and conditionals. The AST of the underlying parsed manifest
+is updated with the changes, and the result is serialised to a file.
+"""
+
+Result = namedtuple("Result", ["run_info", "status"])
+
+
+def data_cls_getter(output_node, visited_node):
+    # visited_node is intentionally unused
+    if output_node is None:
+        return ExpectedManifest
+    elif isinstance(output_node, ExpectedManifest):
+        return TestNode
+    elif isinstance(output_node, TestNode):
+        return SubtestNode
+    else:
+        raise ValueError
+
+
+class ExpectedManifest(ManifestItem):
+    def __init__(self, node, test_path=None):
+        """Object representing all the tests in a particular manifest
+
+        :param node: AST Node associated with this object. If this is None,
+                     a new AST is created to associate with this manifest.
+        :param test_path: Path of the test file associated with this manifest.
+        """
+        if node is None:
+            node = DataNode(None)
+        ManifestItem.__init__(self, node)
+        self.child_map = {}
+        self.test_path = test_path
+        self.modified = False
+
+    def append(self, child):
+        ManifestItem.append(self, child)
+        self.child_map[child.id] = child
+        assert len(self.child_map) == len(self.children)
+
+    def _remove_child(self, child):
+        del self.child_map[child.id]
+        ManifestItem._remove_child(self, child)
+        assert len(self.child_map) == len(self.children)
+
+    def get_test(self, test_id):
+        """Return a TestNode by test id, or None if no test matches
+
+        :param test_id: The id of the test to look up"""
+
+        return self.child_map[test_id]
+
+    def has_test(self, test_id):
+        """Boolean indicating whether the current test has a known child test
+        with id test id
+
+        :param test_id: The id of the test to look up"""
+
+        return test_id in self.child_map
+
+
+class TestNode(ManifestItem):
+    def __init__(self, node):
+        """Tree node associated with a particular test in a manifest
+
+        :param node: AST node associated with the test"""
+
+        ManifestItem.__init__(self, node)
+        self.updated_expected = []
+        self.new_expected = []
+        self.subtests = {}
+        self.default_status = None
+        self._from_file = True
+
+    @classmethod
+    def create(cls, test_type, test_id):
+        """Create a TestNode corresponding to a given test
+
+        :param test_type: The type of the test
+        :param test_id: The id of the test"""
+
+        if test_type == "reftest":
+            url = test_id[0]
+        else:
+            url = test_id
+        name = url.split("/")[-1]
+        node = DataNode(name)
+        self = cls(node)
+
+        self.set("type", test_type)
+        if test_type == "reftest":
+            self.set("reftype", test_id[1])
+            self.set("refurl", test_id[2])
+        self._from_file = False
+        return self
+
+    @property
+    def is_empty(self):
+        required_keys = set(["type"])
+        if self.test_type == "reftest":
+            required_keys |= set(["reftype", "refurl"])
+        if set(self._data.keys()) != required_keys:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def test_type(self):
+        """The type of the test represented by this TestNode"""
+
+        return self.get("type", None)
+
+    @property
+    def id(self):
+        """The id of the test represented by this TestNode"""
+
+        components = self.parent.test_path.split(os.path.sep)[:-1]
+        components.append(self.name)
+        url = "/" + "/".join(components)
+        if self.test_type == "reftest":
+            return (url, self.get("reftype", None), self.get("refurl", None))
+        else:
+            return url
+
+    def disabled(self, run_info):
+        """Boolean indicating whether this test is disabled when run in an
+        environment with the given run_info
+
+        :param run_info: Dictionary of run_info parameters"""
+
+        return self.get("disabled", run_info) is not None
+
+    def set_result(self, run_info, result):
+        """Set the result of the test in a particular run
+
+        :param run_info: Dictionary of run_info parameters corresponding
+                         to this run
+        :param result: Status of the test in this run"""
+
+        if self.default_status is not None:
+            assert self.default_status == result.default_expected
+        else:
+            self.default_status = result.default_expected
+
+        # Add this result to the list of results satisfying
+        # any condition in the list of updated results it matches
+        for (cond, values) in self.updated_expected:
+            if cond(run_info):
+                values.append(Result(run_info, result.status))
+                if result.status != cond.value:
+                    self.root.modified = True
+                break
+        else:
+            # We didn't find a previous value for this
+            self.new_expected.append(Result(run_info, result.status))
+            self.root.modified = True
+
+    def coalesce_expected(self):
+        """Update the underlying manifest AST for this test based on all the
+        added results.
+
+        This will update existing conditionals if they got the same result in
+        all matching runs in the updated results, will delete existing conditionals
+        that get more than one different result in the updated run, and add new
+        conditionals for anything that doesn't match an existing conditional.
+
+        Conditionals not matched by any added result are not changed."""
+
+        final_conditionals = []
+
+        try:
+            unconditional_status = self.get("expected")
+        except KeyError:
+            unconditional_status = self.default_status
+
+        for conditional_value, results in self.updated_expected:
+            if not results:
+                # The conditional didn't match anything in these runs so leave it alone
+                final_conditionals.append(conditional_value)
+            elif all(results[0].status == result.status for result in results):
+                # All the new values for this conditional matched, so update the node
+                result = results[0]
+                if (result.status == unconditional_status and
+                    conditional_value.condition_node is not None):
+                    self.remove_value("expected", conditional_value)
+                else:
+                    conditional_value.value = result.status
+                    final_conditionals.append(conditional_value)
+            elif conditional_value.condition_node is not None:
+                # Blow away the existing condition and rebuild from scratch
+                # This isn't sure to work if we have a conditional later that matches
+                # these values too, but we can hope, verify that we get the results
+                # we expect, and if not let a human sort it out
+                self.remove_value("expected", conditional_value)
+                self.new_expected.extend(results)
+            elif conditional_value.condition_node is None:
+                self.new_expected.extend(result for result in results
+                                         if result.status != unconditional_status)
+
+        # It is an invariant that nothing in new_expected matches an existing
+        # condition except for the default condition
+
+        if self.new_expected:
+            if all(self.new_expected[0].status == result.status
+                   for result in self.new_expected) and not self.updated_expected:
+                status = self.new_expected[0].status
+                if status != self.default_status:
+                    self.set("expected", status, condition=None)
+                    final_conditionals.append(self._data["expected"][-1])
+            else:
+                for conditional_node, status in group_conditionals(self.new_expected):
+                    if status != unconditional_status:
+                        self.set("expected", status, condition=conditional_node.children[0])
+                        final_conditionals.append(self._data["expected"][-1])
+
+        if ("expected" in self._data and
+            len(self._data["expected"]) > 0 and
+            self._data["expected"][-1].condition_node is None and
+            self._data["expected"][-1].value == self.default_status):
+
+            self.remove_value("expected", self._data["expected"][-1])
+
+        if ("expected" in self._data and
+            len(self._data["expected"]) == 0):
+            for child in self.node.children:
+                if (isinstance(child, KeyValueNode) and
+                    child.data == "expected"):
+                    child.remove()
+                    break
+
+    def _add_key_value(self, node, values):
+        ManifestItem._add_key_value(self, node, values)
+        if node.data == "expected":
+            self.updated_expected = []
+            for value in values:
+                self.updated_expected.append((value, []))
+
+    def clear_expected(self):
+        """Clear all the expected data for this test and all of its subtests"""
+
+        self.updated_expected = []
+        if "expected" in self._data:
+            for child in self.node.children:
+                if (isinstance(child, KeyValueNode) and
+                    child.data == "expected"):
+                    child.remove()
+                    del self._data["expected"]
+                    break
+
+        for subtest in self.subtests.itervalues():
+            subtest.clear_expected()
+
+    def append(self, node):
+        child = ManifestItem.append(self, node)
+        self.subtests[child.name] = child
+
+    def get_subtest(self, name):
+        """Return a SubtestNode corresponding to a particular subtest of
+        the current test, creating a new one if no subtest with that name
+        already exists.
+
+        :param name: Name of the subtest"""
+
+        if name in self.subtests:
+            return self.subtests[name]
+        else:
+            subtest = SubtestNode.create(name)
+            self.append(subtest)
+            return subtest
+
+
+class SubtestNode(TestNode):
+    def __init__(self, node):
+        assert isinstance(node, DataNode)
+        TestNode.__init__(self, node)
+
+    @classmethod
+    def create(cls, name):
+        node = DataNode(name)
+        self = cls(node)
+        return self
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return True
+
+
+def group_conditionals(values):
+    """Given a list of Result objects, return a list of
+    (conditional_node, status) pairs representing the conditional
+    expressions that are required to match each status
+
+    :param values: List of Results"""
+
+    by_property = defaultdict(set)
+    for run_info, status in values:
+        for prop_name, prop_value in run_info.iteritems():
+            by_property[(prop_name, prop_value)].add(status)
+
+    # If we have more than one value, remove any properties that are common
+    # for all the values
+    if len(values) > 1:
+        for key, statuses in by_property.copy().iteritems():
+            if len(statuses) == len(values):
+                del by_property[key]
+
+    properties = set(item[0] for item in by_property.iterkeys())
+
+    prop_order = ["debug", "os", "version", "processor", "bits"]
+    include_props = []
+
+    for prop in prop_order:
+        if prop in properties:
+            include_props.append(prop)
+
+    conditions = {}
+
+    for run_info, status in values:
+        prop_set = tuple((prop, run_info[prop]) for prop in include_props)
+        if prop_set in conditions:
+            continue
+
+        expr = make_expr(prop_set, status)
+        conditions[prop_set] = (expr, status)
+
+    return conditions.values()
+
+
+def make_expr(prop_set, status):
+    """Create an AST that returns the value ``status`` given all the
+    properties in prop_set match."""
+    root = ConditionalNode()
+
+    assert len(prop_set) > 0
+
+    no_value_props = set(["debug"])
+
+    expressions = []
+    for prop, value in prop_set:
+        number_types = (int, float, long)
+        value_cls = (NumberNode
+                     if type(value) in number_types
+                     else StringNode)
+        if prop not in no_value_props:
+            expressions.append(
+                BinaryExpressionNode(
+                    BinaryOperatorNode("=="),
+                    VariableNode(prop),
+                    value_cls(unicode(value))
+                ))
+        else:
+            if value:
+                expressions.append(VariableNode(prop))
+            else:
+                expressions.append(
+                    UnaryExpressionNode(
+                        UnaryOperatorNode("not"),
+                        VariableNode(prop)
+                    ))
+    if len(expressions) > 1:
+        prev = expressions[-1]
+        for curr in reversed(expressions[:-1]):
+            node = BinaryExpressionNode(
+                BinaryOperatorNode("and"),
+                curr,
+                prev)
+            prev = node
+    else:
+        node = expressions[0]
+
+    root.append(node)
+    root.append(StringNode(status))
+
+    return root
+
+
+def get_manifest(metadata_root, test_path):
+    """Get the ExpectedManifest for a particular test path, or None if there is no
+    metadata stored for that test path.
+
+    :param metadata_root: Absolute path to the root of the metadata directory
+    :param test_path: Path to the test(s) relative to the test root
+    """
+    manifest_path = expected.expected_path(metadata_root, test_path)
+    try:
+        with open(manifest_path) as f:
+            return compile(f, test_path)
+    except IOError:
+        return None
+
+
+def compile(manifest_file, test_path):
+    return conditional.compile(manifest_file,
+                               data_cls_getter=data_cls_getter,
+                               test_path=test_path)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/metadata.py
@@ -0,0 +1,289 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import shutil
+import sys
+import tempfile
+import types
+import uuid
+from collections import defaultdict
+
+from mozlog.structured import reader
+from mozlog.structured import structuredlog
+
+import expected
+import manifestupdate
+import wptmanifest
+import wpttest
+from vcs import git
+manifest = None  # Module that will be imported relative to test_root
+
+logger = structuredlog.StructuredLogger("web-platform-tests")
+
+
+def manifest_path(metadata_root):
+    return os.path.join(metadata_root, "MANIFEST.json")
+
+
+def load_test_manifest(test_root, metadata_root):
+    do_test_relative_imports(test_root)
+    return manifest.load(manifest_path(metadata_root))
+
+
+def update_manifest(git_root, metadata_root):
+    manifest.setup_git(git_root)
+    # Create an entirely new manifest
+    new_manifest = manifest.Manifest(None)
+    manifest.update(new_manifest)
+    manifest.write(new_manifest, manifest_path(metadata_root))
+    return new_manifest
+
+
+def update_expected(test_root, metadata_root, log_file_names, rev_old=None, rev_new="HEAD",
+                    ignore_existing=False):
+    """Update the metadata files for web-platform-tests based on
+    the results obtained in a previous run"""
+
+    manifest = load_test_manifest(test_root, metadata_root)
+
+    if rev_old is not None:
+        rev_old = git("rev-parse", rev_old, repo=test_root).strip()
+    rev_new = git("rev-parse", rev_new, repo=test_root).strip()
+
+    if rev_old is not None:
+        change_data = load_change_data(rev_old, rev_new, repo=test_root)
+    else:
+        change_data = {}
+
+    expected_map = update_from_logs(metadata_root, manifest, *log_file_names,
+                                    ignore_existing=ignore_existing)
+
+    write_changes(metadata_root, expected_map)
+
+    results_changed = [item.test_path for item in expected_map.itervalues() if item.modified]
+
+    return unexpected_changes(change_data, results_changed)
+
+
+def do_test_relative_imports(test_root):
+    global manifest
+
+    sys.path.insert(0, os.path.join(test_root))
+    sys.path.insert(0, os.path.join(test_root, "tools", "scripts"))
+    import manifest
+
+
+def files_in_repo(repo_root):
+    return git("ls-tree", "-r", "--name-only", "HEAD").split("\n")
+
+
+def rev_range(rev_old, rev_new, symmetric=False):
+    joiner = ".." if not symmetric else "..."
+    return "".join([rev_old, joiner, rev_new])
+
+
+def paths_changed(rev_old, rev_new, repo):
+    data = git("diff", "--name-status", rev_range(rev_old, rev_new), repo=repo)
+    lines = [tuple(item.strip() for item in line.strip().split("\t", 1))
+             for line in data.split("\n") if line.strip()]
+    output = set(lines)
+    return output
+
+
+def load_change_data(rev_old, rev_new, repo):
+    changes = paths_changed(rev_old, rev_new, repo)
+    rv = {}
+    status_keys = {"M": "modified",
+                   "A": "new",
+                   "D": "deleted"}
+    # TODO: deal with renames
+    for item in changes:
+        rv[item[1]] = status_keys[item[0]]
+    return rv
+
+
+def unexpected_changes(change_data, files_changed):
+    return [fn for fn in files_changed if change_data.get(fn) != "M"]
+
+# For each testrun
+# Load all files and scan for the suite_start entry
+# Build a hash of filename: properties
+# For each different set of properties, gather all chunks
+# For each chunk in the set of chunks, go through all tests
+# for each test, make a map of {conditionals: [(platform, new_value)]}
+# Repeat for each platform
+# For each test in the list of tests:
+#   for each conditional:
+#      If all the new values match (or there aren't any) retain that conditional
+#      If any new values mismatch mark the test as needing human attention
+#   Check if all the RHS values are the same; if so collapse the conditionals
+
+
+def update_from_logs(metadata_path, manifest, *log_filenames, **kwargs):
+    ignore_existing = kwargs.pop("ignore_existing", False)
+
+    expected_map, id_path_map = create_test_tree(metadata_path, manifest)
+    updater = ExpectedUpdater(expected_map, id_path_map, ignore_existing=ignore_existing)
+    for log_filename in log_filenames:
+        with open(log_filename) as f:
+            updater.update_from_log(f)
+
+    for tree in expected_map.itervalues():
+        for test in tree.iterchildren():
+            for subtest in test.iterchildren():
+                subtest.coalesce_expected()
+            test.coalesce_expected()
+
+    return expected_map
+
+
+def write_changes(metadata_path, expected_map):
+    # First write the new manifest files to a temporary directory
+    temp_path = tempfile.mkdtemp()
+    write_new_expected(temp_path, expected_map)
+    shutil.copyfile(os.path.join(metadata_path, "MANIFEST.json"),
+                    os.path.join(temp_path, "MANIFEST.json"))
+
+    # Then move the old manifest files to a new location
+    temp_path_2 = metadata_path + str(uuid.uuid4())
+    os.rename(metadata_path, temp_path_2)
+    # Move the new files to the destination location and remove the old files
+    os.rename(temp_path, metadata_path)
+    shutil.rmtree(temp_path_2)
+
+
+def write_new_expected(metadata_path, expected_map):
+    # Serialize the data back to a file
+    for tree in expected_map.itervalues():
+        if not tree.is_empty:
+            manifest_str = wptmanifest.serialize(tree.node, skip_empty_data=True)
+            assert manifest_str != ""
+            path = expected.expected_path(metadata_path, tree.test_path)
+            dir = os.path.split(path)[0]
+            if not os.path.exists(dir):
+                os.makedirs(dir)
+            with open(path, "w") as f:
+                f.write(manifest_str.encode("utf8"))
+
+
+class ExpectedUpdater(object):
+    def __init__(self, expected_tree, id_path_map, ignore_existing=False):
+        self.expected_tree = expected_tree
+        self.id_path_map = id_path_map
+        self.ignore_existing = ignore_existing
+        self.run_info = None
+        self.action_map = {"suite_start": self.suite_start,
+                           "test_start": self.test_start,
+                           "test_status": self.test_status,
+                           "test_end": self.test_end}
+        self.tests_visited = {}
+
+        self.test_cache = {}
+
+    def update_from_log(self, log_file):
+        self.run_info = None
+        log_reader = reader.read(log_file)
+        reader.each_log(log_reader, self.action_map)
+
+    def suite_start(self, data):
+        self.run_info = data["run_info"]
+
+    def test_id(self, id):
+        if type(id) in types.StringTypes:
+            return id
+        else:
+            return tuple(id)
+
+    def test_start(self, data):
+        test_id = self.test_id(data["test"])
+        try:
+            test = self.expected_tree[self.id_path_map[test_id]].get_test(test_id)
+        except KeyError:
+            print "Test not found %s, skipping" % test_id
+            return
+        self.test_cache[test_id] = test
+
+        if test_id not in self.tests_visited:
+            if self.ignore_existing:
+                test.clear_expected()
+            self.tests_visited[test_id] = set()
+
+    def test_status(self, data):
+        test_id = self.test_id(data["test"])
+        test = self.test_cache.get(test_id)
+        if test is None:
+            return
+        test_cls = wpttest.manifest_test_cls[test.test_type]
+
+        subtest = test.get_subtest(data["subtest"])
+
+        self.tests_visited[test.id].add(data["subtest"])
+
+        result = test_cls.subtest_result_cls(
+            data["subtest"],
+            data["status"],
+            data.get("message"))
+
+        subtest.set_result(self.run_info, result)
+
+    def test_end(self, data):
+        test_id = self.test_id(data["test"])
+        test = self.test_cache.get(test_id)
+        if test is None:
+            return
+        test_cls = wpttest.manifest_test_cls[test.test_type]
+
+        result = test_cls.result_cls(
+            data["status"],
+            data.get("message"))
+
+        test.set_result(self.run_info, result)
+        del self.test_cache[test_id]
+
+
+def create_test_tree(metadata_path, manifest):
+    expected_map = {}
+    test_id_path_map = {}
+    exclude_types = frozenset(["stub", "helper", "manual"])
+    include_types = set(manifest.item_types) ^ exclude_types
+    for test_path, tests in manifest.itertypes(*include_types):
+
+        expected_data = load_expected(metadata_path, test_path, tests)
+        if expected_data is None:
+            expected_data = create_expected(test_path, tests)
+
+        expected_map[test_path] = expected_data
+
+        for test in tests:
+            test_id_path_map[test.id] = test_path
+
+    return expected_map, test_id_path_map
+
+
+def create_expected(test_path, tests):
+    expected = manifestupdate.ExpectedManifest(None, test_path)
+    for test in tests:
+        expected.append(manifestupdate.TestNode.create(test.item_type, test.id))
+    return expected
+
+
+def load_expected(metadata_path, test_path, tests):
+    expected_manifest = manifestupdate.get_manifest(metadata_path, test_path)
+    if expected_manifest is None:
+        return
+
+    tests_by_id = {item.id: item for item in tests}
+
+    # Remove expected data for tests that no longer exist
+    for test in expected_manifest.iterchildren():
+        if not test.id in tests_by_id:
+            test.remove()
+
+    # Add tests that don't have expected data
+    for test in tests:
+        if not expected_manifest.has_test(test.id):
+            expected_manifest.append(manifestupdate.TestNode.create(test.item_type, test.id))
+
+    return expected_manifest
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/products.py
@@ -0,0 +1,52 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import importlib
+import imp
+
+from .browsers import product_list
+
+def products_enabled(config):
+    names = config.get("products", {}).keys()
+    if not names:
+        return product_list
+    else:
+        return names
+
+def product_module(config, product):
+    here = os.path.join(os.path.split(__file__)[0])
+    product_dir = os.path.join(here, "browsers")
+
+    if product not in products_enabled(config):
+        raise ValueError("Unknown product %s" % product)
+
+    path = config.get("products", {}).get(product, None)
+    if path:
+        module = imp.load_source('wptrunner.browsers.' + product, path)
+    else:
+        module = importlib.import_module("wptrunner.browsers." + product)
+
+    if not hasattr(module, "__wptrunner__"):
+        raise ValueError("Product module does not define __wptrunner__ variable")
+
+    return module
+
+
+def load_product(config, product):
+    module = product_module(config, product)
+    data = module.__wptrunner__
+
+    check_args = getattr(module, data["check_args"])
+    browser_cls = getattr(module, data["browser"])
+    browser_kwargs = getattr(module, data["browser_kwargs"])
+    executor_kwargs = getattr(module, data["executor_kwargs"])
+    env_options = getattr(module, data["env_options"])()
+
+    executor_classes = {}
+    for test_type, cls_name in data["executor"].iteritems():
+        cls = getattr(module, cls_name)
+        executor_classes[test_type] = cls
+
+    return check_args, browser_cls, browser_kwargs, executor_classes, executor_kwargs, env_options
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/reduce.py
@@ -0,0 +1,197 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+import tempfile
+from cStringIO import StringIO
+from collections import defaultdict
+
+import wptrunner
+import wpttest
+
+from mozlog.structured import commandline, reader
+
+logger = None
+
+
+def setup_logging(args, defaults):
+    global logger
+    logger = commandline.setup_logging("web-platform-tests-unstable", args, defaults)
+    wptrunner.setup_stdlib_logger()
+
+    for name in args.keys():
+        if name.startswith("log_"):
+            args.pop(name)
+
+    return logger
+
+
+def group(items, size):
+    rv = []
+    i = 0
+    while i < len(items):
+        rv.append(items[i:i + size])
+        i += size
+
+    return rv
+
+
+def next_power_of_two(num):
+    rv = 1
+    while rv < num:
+        rv = rv << 1
+    return rv
+
+
+class Reducer(object):
+    def __init__(self, target, **kwargs):
+        self.target = target
+
+        self.test_type = kwargs["test_types"][0]
+        run_info = wpttest.get_run_info(kwargs["metadata_root"],
+                                        kwargs["product"],
+                                        debug=False)
+        test_filter = wptrunner.TestFilter(include=kwargs["include"])
+        self.test_loader = wptrunner.TestLoader(kwargs["tests_root"],
+                                                kwargs["metadata_root"],
+                                                test_filter,
+                                                run_info)
+        if kwargs["repeat"] == 1:
+            logger.critical("Need to specify --repeat with more than one repetition")
+            sys.exit(1)
+        self.kwargs = kwargs
+
+    def run(self):
+        all_tests = self.get_initial_tests()
+
+        tests = all_tests[:-1]
+        target_test = [all_tests[-1]]
+
+        if self.unstable(target_test):
+            return target_test
+
+        if not self.unstable(all_tests):
+            return []
+
+        chunk_size = next_power_of_two(int(len(tests) / 2))
+        logger.debug("Using chunk size %i" % chunk_size)
+
+        while chunk_size >= 1:
+            logger.debug("%i tests remain" % len(tests))
+            chunks = group(tests, chunk_size)
+            chunk_results = [None] * len(chunks)
+
+            for i, chunk in enumerate(chunks):
+                logger.debug("Running chunk %i/%i of size %i" % (i + 1, len(chunks), chunk_size))
+                trial_tests = []
+                chunk_str = ""
+                for j, inc_chunk in enumerate(chunks):
+                    if i != j and chunk_results[j] in (None, False):
+                        chunk_str += "+"
+                        trial_tests.extend(inc_chunk)
+                    else:
+                        chunk_str += "-"
+                logger.debug("Using chunks %s" % chunk_str)
+                trial_tests.extend(target_test)
+
+                chunk_results[i] = self.unstable(trial_tests)
+
+                # if i == len(chunks) - 2 and all(item is False for item in chunk_results[:-1]):
+                # Dangerous? optimisation that if you got stability for 0..N-1 chunks
+                # it must be unstable with the Nth chunk
+                #     chunk_results[i+1] = True
+                #     continue
+
+            new_tests = []
+            keep_str = ""
+            for result, chunk in zip(chunk_results, chunks):
+                if not result:
+                    keep_str += "+"
+                    new_tests.extend(chunk)
+                else:
+                    keep_str += "-"
+
+            logger.debug("Keeping chunks %s" % keep_str)
+
+            tests = new_tests
+
+            chunk_size = int(chunk_size / 2)
+
+        return tests + target_test
+
+    def unstable(self, tests):
+        logger.debug("Running with %i tests" % len(tests))
+
+        self.test_loader.tests = {self.test_type: tests}
+
+        stdout, stderr = sys.stdout, sys.stderr
+        sys.stdout = StringIO()
+        sys.stderr = StringIO()
+
+        with tempfile.NamedTemporaryFile() as f:
+            args = self.kwargs.copy()
+            args["log_raw"] = [f]
+            args["capture_stdio"] = False
+            wptrunner.setup_logging(args, {})
+            wptrunner.run_tests(test_loader=self.test_loader, **args)
+            wptrunner.logger.remove_handler(wptrunner.logger.handlers[0])
+            is_unstable = self.log_is_unstable(f)
+
+            sys.stdout, sys.stderr = stdout, stderr
+
+        logger.debug("Result was unstable with chunk removed"
+                     if is_unstable else "stable")
+
+        return is_unstable
+
+    def log_is_unstable(self, log_f):
+        log_f.seek(0)
+
+        statuses = defaultdict(set)
+
+        def handle_status(item):
+            if item["test"] == self.target:
+                statuses[item["subtest"]].add(item["status"])
+
+        def handle_end(item):
+            if item["test"] == self.target:
+                statuses[None].add(item["status"])
+
+        reader.each_log(reader.read(log_f),
+                        {"test_status": handle_status,
+                         "test_end": handle_end})
+
+        logger.debug(str(statuses))
+
+        if not statuses:
+            logger.error("Didn't get any useful output from wptrunner")
+            log_f.seek(0)
+            for item in reader.read(log_f):
+                logger.debug(item)
+            return None
+
+        return any(len(item) > 1 for item in statuses.itervalues())
+
+    def get_initial_tests(self):
+        # Need to pass in arguments
+
+        all_tests = self.test_loader.load_tests([self.test_type],
+                                                "none", 1, 1)[self.test_type]
+        tests = []
+        for item in all_tests:
+            tests.append(item)
+            if item.url == self.target:
+                break
+
+        logger.debug("Starting with tests: %s" % ("\n".join(item.id for item in tests)))
+
+        return tests
+
+
+def do_reduce(**kwargs):
+    target = kwargs.pop("target")
+    reducer = Reducer(target, **kwargs)
+
+    unstable_set = reducer.run()
+    return unstable_set
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/testharness_runner.html
@@ -0,0 +1,6 @@
+<!doctype html>
+<title></title>
+<script>
+var timeout_multiplier = 1;
+var win = null;
+</script>
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/testharnessreport.js
@@ -0,0 +1,15 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var props = {output:false,
+             explicit_timeout: true};
+if (window.opener && "timeout_multiplier" in window.opener) {
+    props["timeout_multiplier"] = window.opener.timeout_multiplier;
+}
+setup(props);
+add_completion_callback(function() {
+    add_completion_callback(function(tests, status) {
+        window.opener.done(tests, status)
+    })
+});
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/testrunner.py
@@ -0,0 +1,572 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import multiprocessing
+import sys
+import threading
+import traceback
+from Queue import Empty
+from multiprocessing import Process, current_process, Queue
+
+from mozlog.structured import structuredlog
+
+# Special value used as a sentinal in various commands
+Stop = object()
+
+
+class MessageLogger(object):
+    def __init__(self, message_func):
+        self.send_message = message_func
+
+    def _log_data(self, action, **kwargs):
+        self.send_message("log", action, kwargs)
+
+    def process_output(self, process, data, command):
+        self._log_data("process_output", process=process, data=data, command=command)
+
+
+def _log_func(level_name):
+    def log(self, message):
+        self._log_data(level_name.lower(), message=message)
+    log.__doc__ = """Log a message with level %s
+
+:param message: The string message to log
+""" % level_name
+    log.__name__ = str(level_name).lower()
+    return log
+
+# Create all the methods on StructuredLog for debug levels
+for level_name in structuredlog.log_levels:
+    setattr(MessageLogger, level_name.lower(), _log_func(level_name))
+
+
+class TestRunner(object):
+    def __init__(self, test_queue, command_queue, result_queue, executor):
+        """Class implementing the main loop for running tests.
+
+        This class delegates the job of actually running a test to the executor
+        that is passed in.
+
+        :param test_queue: subprocess.Queue containing the tests to run
+        :param command_queue: subprocess.Queue used to send commands to the
+                              process
+        :param result_queue: subprocess.Queue used to send results to the
+                             parent TestManager process
+        :param executor: TestExecutor object that will actually run a test.
+        """
+        self.test_queue = test_queue
+        self.command_queue = command_queue
+        self.result_queue = result_queue
+
+        self.executor = executor
+        self.name = current_process().name
+        self.logger = MessageLogger(self.send_message)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.teardown()
+
+    def setup(self):
+        self.executor.setup(self)
+
+    def teardown(self):
+        self.executor.teardown()
+        self.send_message("runner_teardown")
+        self.result_queue = None
+        self.command_queue = None
+        self.browser = None
+
+    def run(self):
+        """Main loop accepting commands over the pipe and triggering
+        the associated methods"""
+        self.setup()
+        commands = {"run_test": self.run_test,
+                    "stop": self.stop}
+        while True:
+            command, args = self.command_queue.get()
+            try:
+                rv = commands[command](*args)
+            except Exception:
+                self.send_message("error",
+                                  "Error running command %s with arguments %r:\n%s" %
+                                  (command, args, traceback.format_exc()))
+            else:
+                if rv is Stop:
+                    break
+
+    def stop(self):
+        return Stop
+
+    def run_test(self):
+        if not self.executor.is_alive():
+            self.send_message("restart_runner")
+            return
+        try:
+            # Need to block here just to allow for contention with other processes
+            test = self.test_queue.get(block=True, timeout=1)
+        except Empty:
+            self.logger.info("No more tests")
+            return Stop
+        else:
+            self.send_message("test_start", test)
+        try:
+            return self.executor.run_test(test)
+        except Exception:
+            self.logger.critical(traceback.format_exc())
+            raise
+
+    def send_message(self, command, *args):
+        self.result_queue.put((command, args))
+
+
+def start_runner(test_queue, runner_command_queue, runner_result_queue,
+                 executor_cls, executor_kwargs,
+                 executor_browser_cls, executor_browser_kwargs,
+                 stop_flag):
+    """Launch a TestRunner in a new process"""
+    try:
+        browser = executor_browser_cls(**executor_browser_kwargs)
+        executor = executor_cls(browser, **executor_kwargs)
+        with TestRunner(test_queue, runner_command_queue, runner_result_queue, executor) as runner:
+            try:
+                runner.run()
+            except KeyboardInterrupt:
+                stop_flag.set()
+    except Exception:
+        runner_result_queue.put(("log", ("critical", {"message": traceback.format_exc()})))
+        print >> sys.stderr, traceback.format_exc()
+        stop_flag.set()
+    finally:
+        runner_command_queue = None
+        runner_result_queue = None
+
+
+manager_count = 0
+
+
+def next_manager_number():
+    global manager_count
+    local = manager_count = manager_count + 1
+    return local
+
+
+class TestRunnerManager(threading.Thread):
+    init_lock = threading.Lock()
+
+    def __init__(self, suite_name, tests_queue, browser_cls, browser_kwargs,
+                 executor_cls, executor_kwargs, stop_flag, pause_on_unexpected=False):
+        """Thread that owns a single TestRunner process and any processes required
+        by the TestRunner (e.g. the Firefox binary).
+
+        TestRunnerManagers are responsible for launching the browser process and the
+        runner process, and for logging the test progress. The actual test running
+        is done by the TestRunner. In particular they:
+
+        * Start the binary of the program under test
+        * Start the TestRunner
+        * Tell the TestRunner to start a test, if any
+        * Log that the test started
+        * Log the test results
+        * Take any remedial action required e.g. restart crashed or hung
+          processes
+        """
+        self.suite_name = suite_name
+        self.tests_queue = tests_queue
+
+        self.browser_cls = browser_cls
+        self.browser_kwargs = browser_kwargs
+
+        self.executor_cls = executor_cls
+        self.executor_kwargs = executor_kwargs
+
+        self.browser = None
+        self.browser_pid = None
+
+        # Flags used to shut down this thread if we get a sigint
+        self.parent_stop_flag = stop_flag
+        self.child_stop_flag = multiprocessing.Event()
+
+        self.pause_on_unexpected = pause_on_unexpected
+
+        self.manager_number = next_manager_number()
+
+        self.command_queue = Queue()
+        self.remote_queue = Queue()
+
+        self.test_runner_proc = None
+
+        threading.Thread.__init__(self, name="Thread-TestrunnerManager-%i" % self.manager_number)
+        # This is started in the actual new thread
+        self.logger = None
+
+        # The test that is currently running
+        self.test = None
+
+        self.unexpected_count = 0
+
+        # This may not really be what we want
+        self.daemon = True
+
+        self.init_fail_count = 0
+        self.max_init_fails = 5
+        self.init_timer = None
+
+        self.restart_count = 0
+        self.max_restarts = 5
+
+    def run(self):
+        """Main loop for the TestManager.
+
+        TestManagers generally receive commands from their
+        TestRunner updating them on the status of a test. They
+        may also have a stop flag set by the main thread indicating
+        that the manager should shut down the next time the event loop
+        spins."""
+        self.logger = structuredlog.StructuredLogger(self.suite_name)
+        with self.browser_cls(self.logger, **self.browser_kwargs) as browser:
+            self.browser = browser
+            try:
+                self.init()
+                while True:
+                    commands = {"init_succeeded": self.init_succeeded,
+                                "init_failed": self.init_failed,
+                                "test_start": self.test_start,
+                                "test_ended": self.test_ended,
+                                "restart_runner": self.restart_runner,
+                                "runner_teardown": self.runner_teardown,
+                                "log": self.log,
+                                "error": self.error}
+                    try:
+                        command, data = self.command_queue.get(True, 1)
+                    except IOError:
+                        if not self.should_stop():
+                            self.logger.error("Got IOError from poll")
+                            self.restart_count += 1
+                            if self.restart_runner() is Stop:
+                                break
+                    except Empty:
+                        command = None
+
+                    if self.should_stop():
+                        self.logger.debug("A flag was set; stopping")
+                        break
+
+                    if command is not None:
+                        self.restart_count = 0
+                        if commands[command](*data) is Stop:
+                            break
+                    else:
+                        if not self.test_runner_proc.is_alive():
+                            if not self.command_queue.empty():
+                                # We got a new message so process that
+                                continue
+
+                            # If we got to here the runner presumably shut down
+                            # unexpectedly
+                            self.logger.info("Test runner process shut down")
+
+                            if self.test is not None:
+                                # This could happen if the test runner crashed for some other
+                                # reason
+                                # Need to consider the unlikely case where one test causes the
+                                # runner process to repeatedly die
+                                self.logger.info("Last test did not complete, requeueing")
+                                self.requeue_test()
+                            self.logger.warning(
+                                "More tests found, but runner process died, restarting")
+                            self.restart_count += 1
+                            if self.restart_runner() is Stop:
+                                break
+            finally:
+                self.logger.debug("TestRunnerManager main loop terminating, starting cleanup")
+                self.stop_runner()
+                self.teardown()
+                self.logger.debug("TestRunnerManager main loop terminated")
+
+    def should_stop(self):
+        return self.child_stop_flag.is_set() or self.parent_stop_flag.is_set()
+
+    def init(self):
+        """Launch the browser that is being tested,
+        and the TestRunner process that will run the tests."""
+        # It seems that this lock is helpful to prevent some race that otherwise
+        # sometimes stops the spawned processes initalising correctly, and
+        # leaves this thread hung
+        if self.init_timer is not None:
+            self.init_timer.cancel()
+
+        self.logger.debug("Init called, starting browser and runner")
+
+        def init_failed():
+            # This is called from a seperate thread, so we send a message to the
+            # main loop so we get back onto the manager thread
+            self.logger.debug("init_failed called from timer")
+            if self.command_queue:
+                self.command_queue.put(("init_failed", ()))
+            else:
+                self.logger.debug("Setting child stop flag in init_failed")
+                self.child_stop_flag.set()
+
+        with self.init_lock:
+            # To guard against cases where we fail to connect with marionette for
+            # whatever reason
+            self.init_timer = threading.Timer(self.browser.init_timeout, init_failed)
+            try:
+                self.init_timer.start()
+                self.browser.start()
+                self.browser_pid = self.browser.pid()
+                self.start_test_runner()
+            except:
+                self.logger.warning("Failure during init %s" % traceback.format_exc())
+                self.init_timer.cancel()
+                self.logger.error(traceback.format_exc())
+                succeeded = False
+            else:
+                succeeded = True
+
+        # This has to happen after the lock is released
+        if not succeeded:
+            self.init_failed()
+
+    def init_succeeded(self):
+        """Callback when we have started the browser, connected via
+        marionette, and we are ready to start testing"""
+        self.logger.debug("Init succeeded")
+        self.init_timer.cancel()
+        self.init_fail_count = 0
+        self.start_next_test()
+
+    def init_failed(self):
+        """Callback when we can't connect to the browser via
+        marionette for some reason"""
+        self.init_fail_count += 1
+        self.logger.error("Init failed %i" % self.init_fail_count)
+        self.init_timer.cancel()
+        if self.init_fail_count < self.max_init_fails:
+            self.restart_runner()
+        else:
+            self.logger.critical("Test runner failed to initialise correctly; shutting down")
+            return Stop
+
+    def start_test_runner(self):
+        # Note that we need to be careful to start the browser before the
+        # test runner to ensure that any state set when the browser is started
+        # can be passed in to the test runner.
+        assert self.command_queue is not None
+        assert self.remote_queue is not None
+        executor_browser_cls, executor_browser_kwargs = self.browser.executor_browser()
+        args = (self.tests_queue,
+                self.remote_queue,
+                self.command_queue,
+                self.executor_cls,
+                self.executor_kwargs,
+                executor_browser_cls,
+                executor_browser_kwargs,
+                self.child_stop_flag)
+        self.test_runner_proc = Process(target=start_runner,
+                                        args=args,
+                                        name="Thread-TestRunner-%i" % self.manager_number)
+        self.test_runner_proc.start()
+        self.logger.debug("Test runner started")
+
+    def send_message(self, command, *args):
+        self.remote_queue.put((command, args))
+
+    def cleanup(self):
+        if self.init_timer is not None:
+            self.init_timer.cancel()
+        self.logger.debug("TestManager cleanup")
+        while True:
+            try:
+                self.logger.warning(" ".join(map(repr, self.command_queue.get_nowait())))
+            except Empty:
+                break
+
+        while True:
+            try:
+                self.logger.warning(" ".join(map(repr, self.remote_queue.get_nowait())))
+            except Empty:
+                break
+
+    def teardown(self):
+        self.logger.debug("teardown in testrunnermanager")
+        self.test_runner_proc = None
+        self.command_queue.close()
+        self.remote_queue.close()
+        self.command_queue = None
+        self.remote_queue = None
+
+    def ensure_runner_stopped(self):
+        if self.test_runner_proc is None:
+            return
+
+        self.test_runner_proc.join(10)
+        if self.test_runner_proc.is_alive():
+            # This might leak a file handle from the queue
+            self.logger.warning("Forcibly terminating runner process")
+            self.test_runner_proc.terminate()
+            self.test_runner_proc.join(10)
+        else:
+            self.logger.debug("Testrunner exited with code %i" % self.test_runner_proc.exitcode)
+
+    def runner_teardown(self):
+        self.ensure_runner_stopped()
+        return Stop
+
+    def stop_runner(self):
+        """Stop the TestRunner and the Firefox binary."""
+        self.logger.debug("Stopping runner")
+        if self.test_runner_proc is None:
+            return
+        try:
+            self.browser.stop()
+            if self.test_runner_proc.is_alive():
+                self.send_message("stop")
+                self.ensure_runner_stopped()
+        finally:
+            self.cleanup()
+
+    def start_next_test(self):
+        self.send_message("run_test")
+
+    def requeue_test(self):
+        self.test_queue.put(self.test)
+        self.test = None
+
+    def test_start(self, test):
+        self.test = test
+        self.logger.test_start(test.id)
+
+    def test_ended(self, test, results):
+        """Handle the end of a test.
+
+        Output the result of each subtest, and the result of the overall
+        harness to the logs.
+        """
+        assert test == self.test
+        # Write the result of each subtest
+        file_result, test_results = results
+        subtest_unexpected = False
+        for result in test_results:
+            if test.disabled(result.name):
+                continue
+            expected = test.expected(result.name)
+            is_unexpected = expected != result.status
+
+            if is_unexpected:
+                self.unexpected_count += 1
+                self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
+                subtest_unexpected = True
+            self.logger.test_status(test.id,
+                                    result.name,
+                                    result.status,
+                                    message=result.message,
+                                    expected=expected)
+
+        # TODO: consider changing result if there is a crash dump file
+
+        # Write the result of the test harness
+        expected = test.expected()
+        status = file_result.status if file_result.status != "EXTERNAL-TIMEOUT" else "TIMEOUT"
+        is_unexpected = expected != status
+        if is_unexpected:
+            self.unexpected_count += 1
+            self.logger.debug("Unexpected count in this thread %i" % self.unexpected_count)
+        if status == "CRASH":
+            self.browser.log_crash(self.logger, process=self.browser_pid, test=test.id)
+
+        self.logger.test_end(test.id,
+                             status,
+                             message=file_result.message,
+                             expected=expected)
+
+        self.test = None
+
+        if self.pause_on_unexpected and (subtest_unexpected or is_unexpected):
+            self.logger.info("Got an unexpected result, pausing until the browser exists")
+            self.browser.runner.process_handler.wait()
+
+        # Handle starting the next test, with a runner restart if required
+        if (file_result.status in ("CRASH", "EXTERNAL-TIMEOUT") or
+            subtest_unexpected or is_unexpected):
+            return self.restart_runner()
+        else:
+            return self.start_next_test()
+
+    def restart_runner(self):
+        """Stop and restart the TestRunner"""
+        if self.restart_count >= self.max_restarts:
+            return Stop
+        self.logger.info("Restarting runner")
+        self.stop_runner()
+        self.init()
+
+    def log(self, action, kwargs):
+        getattr(self.logger, action)(**kwargs)
+
+    def error(self, message):
+        self.logger.error(message)
+        self.restart_runner()
+
+
+class ManagerGroup(object):
+    def __init__(self, suite_name, size, browser_cls, browser_kwargs,
+                 executor_cls, executor_kwargs, pause_on_unexpected=False):
+        """Main thread object that owns all the TestManager threads."""
+        self.suite_name = suite_name
+        self.size = size
+        self.browser_cls = browser_cls
+        self.browser_kwargs = browser_kwargs
+        self.executor_cls = executor_cls
+        self.executor_kwargs = executor_kwargs
+        self.pool = set()
+        # Event that is polled by threads so that they can gracefully exit in the face
+        # of sigint
+        self.stop_flag = threading.Event()
+        self.pause_on_unexpected = pause_on_unexpected
+        self.logger = structuredlog.StructuredLogger(suite_name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.stop()
+
+    def start(self, tests_queue):
+        """Start all managers in the group"""
+        self.logger.debug("Using %i processes" % self.size)
+        self.tests_queue = tests_queue
+        for _ in range(self.size):
+            manager = TestRunnerManager(self.suite_name,
+                                        tests_queue,
+                                        self.browser_cls,
+                                        self.browser_kwargs,
+                                        self.executor_cls,
+                                        self.executor_kwargs,
+                                        self.stop_flag,
+                                        self.pause_on_unexpected)
+            manager.start()
+            self.pool.add(manager)
+
+    def is_alive(self):
+        """Boolean indicating whether any manager in the group is still alive"""
+        return any(manager.is_alive() for manager in self.pool)
+
+    def wait(self):
+        """Wait for all the managers in the group to finish"""
+        for item in self.pool:
+            item.join()
+
+    def stop(self):
+        """Set the stop flag so that all managers in the group stop as soon
+        as possible"""
+        self.stop_flag.set()
+        self.logger.debug("Stop flag set in ManagerGroup")
+
+    def unexpected_count(self):
+        return sum(item.unexpected_count for item in self.pool)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/tests/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/tests/test_chunker.py
@@ -0,0 +1,79 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+import sys
+sys.path.insert(0, "..")
+
+from wptrunner import wptrunner
+
+class MockTest(object):
+    def __init__(self, id, timeout=10):
+        self.id = id
+        self.item_type = "testharness"
+        self.timeout = timeout
+
+
+def make_mock_manifest(*items):
+    rv = []
+    for dir_path, num_tests in items:
+        for i in range(num_tests):
+            rv.append((dir_path + "/%i.test" % i, set([MockTest(i)])))
+    return rv
+
+
+class TestEqualTimeChunker(unittest.TestCase):
+
+    def test_include_all(self):
+        tests = make_mock_manifest(("a", 10), ("a/b", 10), ("c", 10))
+
+        chunk_1 = list(wptrunner.EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(wptrunner.EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(wptrunner.EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:10], chunk_1)
+        self.assertEquals(tests[10:20], chunk_2)
+        self.assertEquals(tests[20:], chunk_3)
+
+    def test_include_all_1(self):
+        tests = make_mock_manifest(("a", 5), ("a/b", 5), ("c", 10), ("d", 10))
+
+        chunk_1 = list(wptrunner.EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(wptrunner.EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(wptrunner.EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:10], chunk_1)
+        self.assertEquals(tests[10:20], chunk_2)
+        self.assertEquals(tests[20:], chunk_3)
+
+    def test_long(self):
+        tests = make_mock_manifest(("a", 100), ("a/b", 1), ("c", 1))
+
+        chunk_1 = list(wptrunner.EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(wptrunner.EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(wptrunner.EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:100], chunk_1)
+        self.assertEquals(tests[100:101], chunk_2)
+        self.assertEquals(tests[101:102], chunk_3)
+
+    def test_long_1(self):
+        tests = make_mock_manifest(("a", 1), ("a/b", 100), ("c", 1))
+
+        chunk_1 = list(wptrunner.EqualTimeChunker(3, 1)(tests))
+        chunk_2 = list(wptrunner.EqualTimeChunker(3, 2)(tests))
+        chunk_3 = list(wptrunner.EqualTimeChunker(3, 3)(tests))
+
+        self.assertEquals(tests[:1], chunk_1)
+        self.assertEquals(tests[1:101], chunk_2)
+        self.assertEquals(tests[101:102], chunk_3)
+
+    def test_too_few_dirs(self):
+        with self.assertRaises(ValueError):
+            tests = make_mock_manifest(("a", 1), ("a/b", 100), ("c", 1))
+            list(wptrunner.EqualTimeChunker(4, 1)(tests))
+
+
+if __name__ == "__main__":
+    unittest.main()
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/tests/test_hosts.py
@@ -0,0 +1,59 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+import sys
+from cStringIO import StringIO
+
+sys.path.insert(0, "..")
+
+import hosts
+
+
+class HostsTest(unittest.TestCase):
+    def do_test(self, input, expected):
+        host_file = hosts.HostsFile.from_file(StringIO(input))
+        self.assertEquals(host_file.to_string(), expected)
+
+    def test_simple(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias # comment
+# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_blank_lines(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias # comment
+
+\r
+    \t
+# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_whitespace(self):
+        self.do_test("""    \t127.0.0.1    \tlocalhost  alias # comment     \r
+    \t# Another comment""",
+                     """127.0.0.1 localhost alias # comment
+# Another comment
+""")
+
+    def test_alignment(self):
+        self.do_test("""127.0.0.1    \tlocalhost  alias
+192.168.1.1 another_host    another_alias
+""","""127.0.0.1   localhost    alias
+192.168.1.1 another_host another_alias
+"""
+)
+
+    def test_multiple_same_name(self):
+        # The semantics are that we overwrite earlier entries with the same name
+        self.do_test("""127.0.0.1    \tlocalhost  alias
+192.168.1.1 localhost    another_alias""","""192.168.1.1 localhost another_alias
+"""
+)
+
+if __name__ == "__main__":
+    unittest.main()
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/tests/test_update.py
@@ -0,0 +1,322 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+import StringIO
+
+from .. import metadata, manifestupdate
+from mozlog.structured import structuredlog, handlers, formatters
+
+
+class TestExpectedUpdater(unittest.TestCase):
+    def create_manifest(self, data, test_path="path/to/test.ini"):
+        f = StringIO.StringIO(data)
+        return manifestupdate.compile(f, test_path)
+
+    def create_updater(self, data, **kwargs):
+        expected_tree = {}
+        id_path_map = {}
+        for test_path, test_ids, manifest_str in data:
+            if isinstance(test_ids, (str, unicode)):
+                test_ids = [test_ids]
+            expected_tree[test_path] = self.create_manifest(manifest_str, test_path)
+            for test_id in test_ids:
+                id_path_map[test_id] = test_path
+
+        return metadata.ExpectedUpdater(expected_tree, id_path_map, **kwargs)
+
+    def create_log(self, *args, **kwargs):
+        logger = structuredlog.StructuredLogger("expected_test")
+        data = StringIO.StringIO()
+        handler = handlers.StreamHandler(data, formatters.JSONFormatter())
+        logger.add_handler(handler)
+
+        log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] +
+                       list(args) +
+                       [("suite_end", {})])
+
+        for item in log_entries:
+            action, kwargs = item
+            getattr(logger, action)(**kwargs)
+        logger.remove_handler(handler)
+        data.seek(0)
+        return data
+
+
+    def coalesce_results(self, trees):
+        for tree in trees:
+            for test in tree.iterchildren():
+                for subtest in test.iterchildren():
+                    subtest.coalesce_expected()
+                test.coalesce_expected()
+
+    def test_update_0(self):
+        prev_data = [("path/to/test.htm.ini", ["/path/to/test.htm"], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data = self.create_log(("test_start", {"test": "/path/to/test.htm"}),
+                                   ("test_status", {"test": "/path/to/test.htm",
+                                                    "subtest": "test1",
+                                                    "status": "PASS",
+                                                    "expected": "FAIL"}),
+                                   ("test_end", {"test": "/path/to/test.htm",
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertTrue(new_manifest.is_empty)
+
+    def test_update_1(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: ERROR""")]
+
+        new_data = self.create_log(("test_start", {"test": test_id}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test1",
+                                                    "status": "FAIL",
+                                                    "expected": "ERROR"}),
+                                   ("test_end", {"test": test_id,
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
+
+    def test_new_subtest(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data = self.create_log(("test_start", {"test": test_id}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test1",
+                                                    "status": "FAIL",
+                                                    "expected": "FAIL"}),
+                                   ("test_status", {"test": test_id,
+                                                    "subtest": "test2",
+                                                    "status": "FAIL",
+                                                    "expected": "PASS"}),
+                                   ("test_end", {"test": test_id,
+                                                 "status": "OK"}))
+        updater = self.create_updater(prev_data)
+        updater.update_from_log(new_data)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+        self.coalesce_results([new_manifest])
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[1].get("expected"), "FAIL")
+
+    def test_update_multiple_0(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
+
+    def test_update_multiple_1(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "linux"}), "TIMEOUT")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "windows"}), "FAIL")
+
+    def test_update_multiple_2(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected: FAIL""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "osx"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
+
+    def test_update_multiple_3(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected:
+      if debug: FAIL
+      if not debug and os == "osx": TIMEOUT""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "osx"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "TIMEOUT",
+                                                      "expected": "FAIL"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "osx"})
+        updater = self.create_updater(prev_data)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "TIMEOUT")
+
+    def test_update_ignore_existing(self):
+        test_id = "/path/to/test.htm"
+        prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm]
+  type: testharness
+  [test1]
+    expected:
+      if debug: TIMEOUT
+      if not debug and os == "osx": NOTRUN""")]
+
+        new_data_0 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "PASS"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": False, "os": "linux"})
+
+        new_data_1 = self.create_log(("test_start", {"test": test_id}),
+                                     ("test_status", {"test": test_id,
+                                                      "subtest": "test1",
+                                                      "status": "FAIL",
+                                                      "expected": "PASS"}),
+                                     ("test_end", {"test": test_id,
+                                                   "status": "OK"}),
+                                     run_info={"debug": True, "os": "windows"})
+        updater = self.create_updater(prev_data, ignore_existing=True)
+
+        updater.update_from_log(new_data_0)
+        updater.update_from_log(new_data_1)
+
+        new_manifest = updater.expected_tree["path/to/test.htm.ini"]
+
+        self.coalesce_results([new_manifest])
+
+        self.assertFalse(new_manifest.is_empty)
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": True, "os": "osx"}), "FAIL")
+        self.assertEquals(new_manifest.get_test(test_id).children[0].get(
+            "expected", {"debug": False, "os": "osx"}), "FAIL")
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/update.py
@@ -0,0 +1,373 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import shutil
+import subprocess
+import sys
+import traceback
+import uuid
+
+import vcs
+from vcs import git, hg
+manifest = None
+import metadata
+import wptcommandline
+
+base_path = os.path.abspath(os.path.split(__file__)[0])
+
+
+def do_test_relative_imports(test_root):
+    global manifest
+
+    sys.path.insert(0, os.path.join(test_root))
+    sys.path.insert(0, os.path.join(test_root, "tools", "scripts"))
+    import manifest
+
+
+class RepositoryError(Exception):
+    pass
+
+
+class WebPlatformTests(object):
+    def __init__(self, remote_url, repo_path, rev="origin/master"):
+        self.remote_url = remote_url
+        self.repo_path = repo_path
+        self.target_rev = rev
+        self.local_branch = uuid.uuid4().hex
+
+    def update(self):
+        if not os.path.exists(self.repo_path):
+            os.makedirs(self.repo_path)
+        if not vcs.is_git_root(self.repo_path):
+            git("clone", self.remote_url, ".", repo=self.repo_path)
+            git("checkout", "-b", self.local_branch, self.target_rev, repo=self.repo_path)
+            assert vcs.is_git_root(self.repo_path)
+        else:
+            if git("status", "--porcelain", repo=self.repo_path):
+                raise RepositoryError("Repository in %s not clean" % self.repo_path)
+
+            git("fetch",
+                self.remote_url,
+                "%s:%s" % (self.target_rev,
+                           self.local_branch),
+                repo=self.repo_path)
+            git("checkout", self.local_branch, repo=self.repo_path)
+        git("submodule", "init", repo=self.repo_path)
+        git("submodule", "update", "--init", "--recursive", repo=self.repo_path)
+
+    @property
+    def rev(self):
+        if vcs.is_git_root(self.repo_path):
+            return git("rev-parse", "HEAD", repo=self.repo_path).strip()
+        else:
+            return None
+
+    def clean(self):
+        git("checkout", self.rev, repo=self.repo_path)
+        git("branch", "-D", self.local_branch, repo=self.repo_path)
+
+    def _tree_paths(self):
+        repo_paths = [self.repo_path] +  [os.path.join(self.repo_path, path)
+                                          for path in self._submodules()]
+
+        rv = []
+
+        for repo_path in repo_paths:
+            paths = git("ls-tree", "-r", "--name-only", "HEAD", repo=repo_path).split("\n")
+            rel_path = os.path.relpath(repo_path, self.repo_path)
+            rv.extend([os.path.join(rel_path, item.strip()) for item in paths if item.strip()])
+
+        return rv
+
+    def _submodules(self):
+        output = git("submodule", "status", "--recursive", repo=self.repo_path)
+        rv = []
+        for line in output.split("\n"):
+            line = line.strip()
+            if not line:
+                continue
+            parts = line.split(" ")
+            rv.append(parts[1])
+        return rv
+
+    def copy_work_tree(self, dest):
+        if os.path.exists(dest):
+            assert os.path.isdir(dest)
+
+        for sub_path in os.listdir(dest):
+            path = os.path.join(dest, sub_path)
+            if os.path.isdir(path):
+                shutil.rmtree(path)
+            else:
+                os.remove(path)
+
+        for tree_path in self._tree_paths():
+            source_path = os.path.join(self.repo_path, tree_path)
+            dest_path = os.path.join(dest, tree_path)
+
+            dest_dir = os.path.split(dest_path)[0]
+            if not os.path.isdir(source_path):
+                if not os.path.exists(dest_dir):
+                    os.makedirs(dest_dir)
+                shutil.copy2(source_path, dest_path)
+
+        for source, destination in [("testharness_runner.html", ""),
+                                    ("testharnessreport.js", "resources/")]:
+            source_path = os.path.join(base_path, source)
+            dest_path = os.path.join(dest, destination, os.path.split(source)[1])
+            shutil.copy2(source_path, dest_path)
+
+
+class NoVCSTree(object):
+    name = "non-vcs"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = os.path.abspath(os.curdir)
+        self.root = root
+
+    @classmethod
+    def is_type(cls, path):
+        return True
+
+    def is_clean(self):
+        return True
+
+    def add_new(self, prefix=None):
+        pass
+
+    def create_patch(self, patch_name, message):
+        pass
+
+    def update_patch(self, include=None):
+        pass
+
+    def commit_patch(self):
+        pass
+
+
+class HgTree(object):
+    name = "mercurial"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = hg("root").strip()
+        self.root = root
+        self.hg = vcs.bind_to_repo(hg, self.root)
+
+    @classmethod
+    def is_type(cls, path):
+        try:
+            hg("root", repo=path)
+        except:
+            return False
+        return True
+
+    def is_clean(self):
+        return self.hg("status").strip() == ""
+
+    def add_new(self, prefix=None):
+        if prefix is not None:
+            args = ("-I", prefix)
+        else:
+            args = ()
+        self.hg("add", *args)
+
+    def create_patch(self, patch_name, message):
+        try:
+            self.hg("qinit")
+        except subprocess.CalledProcessError:
+            # There is already a patch queue in this repo
+            # Should only happen during development
+            pass
+        self.hg("qnew", patch_name, "-X", self.root, "-m", message)
+
+    def update_patch(self, include=None):
+        if include is not None:
+            args = []
+            for item in include:
+                args.extend(["-I", item])
+        else:
+            args = ()
+
+        self.hg("qrefresh", *args)
+
+    def commit_patch(self):
+        self.hg("qfinish", repo=self.repo_root)
+
+
+class GitTree(object):
+    name = "git"
+
+    def __init__(self, root=None):
+        if root is None:
+            root = git("rev-parse", "--show-toplevel").strip()
+        self.root = root
+        self.git = vcs.bind_to_repo(git, self.root)
+        self.message = None
+
+    @classmethod
+    def is_type(cls, path):
+        try:
+            git("rev-parse", "--show-toplevel", repo=path)
+        except:
+            return False
+        return True
+
+    def is_clean(self):
+        return self.git("status").strip() == ""
+
+    def add_new(self, prefix=None):
+        if prefix is None:
+            args = ("-a",)
+        else:
+            args = ("--no-ignore-removal", prefix)
+        self.git("add", *args)
+
+    def create_patch(self, patch_name, message):
+        # In git a patch is actually a branch
+        self.message = message
+        self.git("checkout", "-b", patch_name)
+
+    def update_patch(self, include=None):
+        assert self.message is not None
+
+        if include is not None:
+            args = tuple(include)
+        else:
+            args = ()
+
+        self.git("commit", "-m", self.message, *args)
+
+    def commit_patch(self):
+        pass
+
+
+class Runner(object):
+    def __init__(self, bug, **kwargs):
+        self.bug = bug
+        for k, v in kwargs.iteritems():
+            setattr(self, k, v)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, *args, **kwargs):
+        self.cleanup()
+
+    def cleanup(self):
+        pass
+
+
+def ensure_exists(path):
+    if not os.path.exists(path):
+        os.makedirs(path)
+
+
+def sync_tests(paths, local_tree, wpt, bug):
+    wpt.update()
+
+    try:
+        #bug.comment("Updating to %s" % wpt.rev)
+        initial_manifest = metadata.load_test_manifest(paths["sync"], paths["metadata"])
+        wpt.copy_work_tree(paths["test"])
+        new_manifest = metadata.update_manifest(paths["sync"], paths["metadata"])
+
+        local_tree.create_patch("web-platform-tests_update_%s" % wpt.rev,
+                                "Update web-platform-tests to revision %s" % wpt.rev)
+        local_tree.add_new(os.path.relpath(paths["test"], local_tree.root))
+        local_tree.update_patch(include=[paths["test"], paths["metadata"]])
+    except Exception as e:
+        #bug.comment("Update failed with error:\n %s" % traceback.format_exc())
+        sys.stderr.write(traceback.format_exc())
+        raise
+    finally:
+        pass  # wpt.clean()
+
+    return initial_manifest, new_manifest
+
+
+def update_metadata(paths, local_tree, wpt, initial_rev, bug, log_files, ignore_existing):
+    try:
+        try:
+            local_tree.create_patch("web-platform-tests_update_%s_metadata" % wpt.rev,
+                                    "Update web-platform-tests expected data to revision %s" %
+                                    wpt.rev)
+        except subprocess.CalledProcessError:
+            # Patch with that name already exists, probably
+            pass
+        needs_human = metadata.update_expected(paths["sync"],
+                                               paths["metadata"],
+                                               log_files,
+                                               rev_old=initial_rev,
+                                               ignore_existing=ignore_existing)
+
+        if needs_human:
+            #TODO: List all the files that should be checked carefully for changes.
+            pass
+
+        if not local_tree.is_clean():
+            local_tree.add_new(os.path.relpath(paths["metadata"], local_tree.root))
+            local_tree.update_patch(include=[paths["metadata"]])
+
+    except Exception as e:
+        #bug.comment("Update failed with error:\n %s" % traceback.format_exc())
+        sys.stderr.write(traceback.format_exc())
+        raise
+
+
+def run_update(**kwargs):
+    config = kwargs["config"]
+
+    paths = {"sync": kwargs["sync_path"],
+             "test": kwargs["tests_root"],
+             "metadata": kwargs["metadata_root"]}
+
+    for path in paths.itervalues():
+        ensure_exists(path)
+
+    if not kwargs["sync"] and not kwargs["run_log"]:
+        print """Nothing to do.
+
+Specify --sync to checkout latest upstream or one or more log files to update
+expected data."""
+
+    if kwargs["patch"]:
+        for tree_cls in [HgTree, GitTree, NoVCSTree]:
+            if tree_cls.is_type(os.path.abspath(os.curdir)):
+                local_tree = tree_cls()
+                print "Updating into a %s tree" % local_tree.name
+                break
+    else:
+        local_tree = NoVCSTree()
+
+    if not local_tree.is_clean():
+        sys.stderr.write("Working tree is not clean\n")
+        if not kwargs["no_check_clean"]:
+            sys.exit(1)
+
+    rev = kwargs.get("rev")
+    if rev is None:
+        rev = config["web-platform-tests"].get("branch", "master")
+
+    wpt = WebPlatformTests(config["web-platform-tests"]["remote_url"],
+                           paths["sync"],
+                           rev=rev)
+    bug = None
+
+    initial_rev = None
+    if kwargs["sync"]:
+        initial_manifest, new_manifest = sync_tests(paths, local_tree, wpt, bug)
+        initial_rev = initial_manifest.rev
+
+    if kwargs["run_log"]:
+        update_metadata(paths, local_tree, wpt, initial_rev, bug,
+                        kwargs["run_log"], kwargs["ignore_existing"])
+
+
+def main():
+    args = wptcommandline.parse_args_update()
+    success = run_update(**args)
+    sys.exit(0 if success else 1)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/vcs.py
@@ -0,0 +1,45 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import subprocess
+from functools import partial
+
+
+def vcs(bin_name):
+    def inner(command, *args, **kwargs):
+        repo = kwargs.pop("repo", None)
+        if kwargs:
+            raise TypeError, kwargs
+
+        args = list(args)
+
+        proc_kwargs = {}
+        if repo is not None:
+            proc_kwargs["cwd"] = repo
+
+        command_line = [bin_name, command] + args
+        print " ".join(command_line)
+        try:
+            return subprocess.check_output(command_line, **proc_kwargs)
+        except subprocess.CalledProcessError as e:
+            print proc_kwargs
+            print e.output
+            raise
+    return inner
+
+git = vcs("git")
+hg = vcs("hg")
+
+
+def bind_to_repo(vcs_func, repo):
+    return partial(vcs_func, repo=repo)
+
+
+def is_git_root(path):
+    try:
+        rv = git("rev-parse", "--show-cdup", repo=path)
+    except subprocess.CalledProcessError:
+        return False
+    print rv
+    return rv == "\n"
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptcommandline.py
@@ -0,0 +1,233 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import os
+import sys
+
+import config
+
+def abs_path(path):
+    return os.path.abspath(os.path.expanduser(path))
+
+def url_or_path(path):
+    import urlparse
+
+    parsed = urlparse.urlparse(path)
+    if len(parsed.scheme) > 2:
+        return path
+    else:
+        return abs_path(path)
+
+def slash_prefixed(url):
+    if not url.startswith("/"):
+        url = "/" + url
+    return url
+
+
+def require_arg(kwargs, name, value_func=None):
+    if value_func is None:
+        value_func = lambda x: x is not None
+
+    if not name in kwargs or not value_func(kwargs[name]):
+        print >> sys.stderr, "Missing required argument %s" % name
+        sys.exit(1)
+
+
+def create_parser(product_choices=None):
+    from mozlog.structured import commandline
+
+    import products
+
+    if product_choices is None:
+        config_data = config.load()
+        product_choices = products.products_enabled(config_data)
+
+    parser = argparse.ArgumentParser("web-platform-tests",
+                                     description="Runner for web-platform-tests tests.")
+    parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+                        help="Path to the folder containing test metadata"),
+    parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+                        help="Path to web-platform-tests"),
+    parser.add_argument("--prefs-root", dest="prefs_root", action="store", type=abs_path,
+                        help="Path to the folder containing browser prefs"),
+    parser.add_argument("--config", action="store", type=abs_path,
+                        help="Path to config file")
+    parser.add_argument("--binary", action="store",
+                        type=abs_path, help="Binary to run tests against")
+    parser.add_argument("--test-types", action="store",
+                        nargs="*", default=["testharness", "reftest"],
+                        choices=["testharness", "reftest"],
+                        help="Test types to run")
+    parser.add_argument("--processes", action="store", type=int, default=1,
+                        help="Number of simultaneous processes to use")
+    parser.add_argument("--include", action="append", type=slash_prefixed,
+                        help="URL prefix to include")
+    parser.add_argument("--exclude", action="append", type=slash_prefixed,
+                        help="URL prefix to exclude")
+    parser.add_argument("--include-manifest", type=abs_path,
+                        help="Path to manifest listing tests to include")
+
+    parser.add_argument("--total-chunks", action="store", type=int, default=1,
+                        help="Total number of chunks to use")
+    parser.add_argument("--this-chunk", action="store", type=int, default=1,
+                        help="Chunk number to run")
+    parser.add_argument("--chunk-type", action="store", choices=["none", "equal_time", "hash"],
+                        default=None, help="Chunking type to use")
+
+    parser.add_argument("--list-test-groups", action="store_true",
+                        default=False,
+                        help="List the top level directories containing tests that will run.")
+    parser.add_argument("--list-disabled", action="store_true",
+                        default=False,
+                        help="List the tests that are disabled on the current platform")
+
+    parser.add_argument("--timeout-multiplier", action="store", type=float, default=None,
+                        help="Multiplier relative to standard test timeout to use")
+    parser.add_argument("--repeat", action="store", type=int, default=1,
+                        help="Number of times to run the tests")
+
+    parser.add_argument("--no-capture-stdio", action="store_true", default=False,
+                        help="Don't capture stdio and write to logging")
+
+    parser.add_argument("--product", action="store", choices=product_choices,
+                        default="firefox", help="Browser against which to run tests")
+
+    parser.add_argument('--debugger',
+                        help="run under a debugger, e.g. gdb or valgrind")
+    parser.add_argument('--debugger-args', help="arguments to the debugger")
+    parser.add_argument('--pause-on-unexpected', action="store_true",
+                        help="Halt the test runner when an unexpected result is encountered")
+
+    parser.add_argument("--symbols-path", action="store", type=url_or_path,
+                        help="Path to symbols file used to analyse crash minidumps.")
+    parser.add_argument("--stackwalk-binary", action="store", type=abs_path,
+                        help="Path to stackwalker program used to analyse minidumps.")
+
+    parser.add_argument("--b2g-no-backup", action="store_true", default=False,
+                        help="Don't backup device before testrun with --product=b2g")
+
+    commandline.add_logging_group(parser)
+    return parser
+
+
+def set_from_config(kwargs):
+    if kwargs["config"] is None:
+        kwargs["config"] = config.path()
+
+    kwargs["config"] = config.read(kwargs["config"])
+
+    keys = {"paths": [("tests", "tests_root", True),
+                      ("metadata", "metadata_root", True)],
+            "web-platform-tests": [("remote_url", "remote_url", False),
+                                   ("branch", "branch", False),
+                                   ("sync_path", "sync_path", True)]}
+
+    for section, values in keys.iteritems():
+        for config_value, kw_value, is_path in values:
+            if kw_value in kwargs and kwargs[kw_value] is None:
+                if not is_path:
+                    new_value = kwargs["config"].get(section, {}).get(config_value, None)
+                else:
+                    new_value = kwargs["config"].get(section, {}).get_path(config_value)
+                kwargs[kw_value] = new_value
+
+
+def check_args(kwargs):
+    from mozrunner import cli
+
+    set_from_config(kwargs)
+
+    for key in ["tests_root", "metadata_root"]:
+        name = key.split("_", 1)[0]
+        path = kwargs[key]
+
+        if not os.path.exists(path):
+            print "Fatal: %s path %s does not exist" % (name, path)
+            sys.exit(1)
+
+        if not os.path.isdir(path):
+            print "Fatal: %s path %s is not a directory" % (name, path)
+            sys.exit(1)
+
+    if kwargs["this_chunk"] > 1:
+        require_arg(kwargs, "total_chunks", lambda x: x >= kwargs["this_chunk"])
+
+    if kwargs["chunk_type"] is None:
+        if kwargs["total_chunks"] > 1:
+            kwargs["chunk_type"] = "equal_time"
+        else:
+            kwargs["chunk_type"] = "none"
+
+    if kwargs["debugger"] is not None:
+        debug_args, interactive = cli.debugger_arguments(kwargs["debugger"],
+                                                         kwargs["debugger_args"])
+        if interactive:
+            require_arg(kwargs, "processes", lambda x: x == 1)
+            kwargs["no_capture_stdio"] = True
+        kwargs["interactive"] = interactive
+        kwargs["debug_args"] = debug_args
+    else:
+        kwargs["interactive"] = False
+        kwargs["debug_args"] = None
+
+    if kwargs["binary"] is not None:
+        if not os.path.exists(kwargs["binary"]):
+            print >> sys.stderr, "Binary path %s does not exist" % kwargs["binary"]
+            sys.exit(1)
+
+    return kwargs
+
+
+def create_parser_update():
+    parser = argparse.ArgumentParser("web-platform-tests-update",
+                                     description="Update script for web-platform-tests tests.")
+    parser.add_argument("--metadata", action="store", type=abs_path, dest="metadata_root",
+                        help="Path to the folder containing test metadata"),
+    parser.add_argument("--tests", action="store", type=abs_path, dest="tests_root",
+                        help="Path to web-platform-tests"),
+    parser.add_argument("--sync-path", action="store", type=abs_path,
+                        help="Path to store git checkout of web-platform-tests during update"),
+    parser.add_argument("--remote_url", action="store",
+                        help="URL of web-platfrom-tests repository to sync against"),
+    parser.add_argument("--branch", action="store", type=abs_path,
+                        help="Remote branch to sync against")
+    parser.add_argument("--config", action="store", type=abs_path, help="Path to config file")
+    parser.add_argument("--rev", action="store", help="Revision to sync to")
+    parser.add_argument("--no-check-clean", action="store_true", default=False,
+                        help="Don't check the working directory is clean before updating")
+    parser.add_argument("--patch", action="store_true",
+                        help="Create an mq patch or git branch+commit containing the changes.")
+    parser.add_argument("--sync", dest="sync", action="store_true", default=False,
+                        help="Sync the tests with the latest from upstream")
+    parser.add_argument("--ignore-existing", action="store_true", help="When updating test results only consider results from the logfiles provided, not existing expectations.")
+    # Should make this required iff run=logfile
+    parser.add_argument("run_log", nargs="*", type=abs_path,
+                        help="Log file from run of tests")
+    return parser
+
+
+def create_parser_reduce(product_choices=None):
+    parser = create_parser(product_choices)
+    parser.add_argument("target", action="store", help="Test id that is unstable")
+    return parser
+
+
+def parse_args():
+    parser = create_parser()
+    rv = vars(parser.parse_args())
+    check_args(rv)
+    return rv
+
+def parse_args_update():
+    parser = create_parser_update()
+    rv = vars(parser.parse_args())
+    set_from_config(rv)
+    return rv
+
+def parse_args_reduce():
+    parser = create_parser_reduce()
+    rv = vars(parser.parse_args())
+    check_args(rv)
+    return rv
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/__init__.py
@@ -0,0 +1,8 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from serializer import serialize
+from parser import parse
+from backends.static import compile as compile_static
+from backends.conditional import compile as compile_condition
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/backends/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/backends/conditional.py
@@ -0,0 +1,322 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import operator
+
+from ..node import NodeVisitor, DataNode, ConditionalNode, KeyValueNode, ValueNode
+from ..parser import parse
+
+
+class ConditionalValue(object):
+    def __init__(self, node, condition_func):
+        self.node = node
+        self.condition_func = condition_func
+        if isinstance(node, ConditionalNode):
+            assert len(node.children) == 2
+            self.condition_node = self.node.children[0]
+            self.value_node = self.node.children[1]
+        else:
+            assert isinstance(node, ValueNode)
+            self.condition_node = None
+            self.value_node = self.node
+
+    @property
+    def value(self):
+        return self.value_node.data
+
+    @value.setter
+    def value(self, value):
+        self.value_node.data = value
+
+    def __call__(self, run_info):
+        return self.condition_func(run_info)
+
+    def set_value(self, value):
+        self.value = value
+
+    def remove(self):
+        if len(self.node.parent.children) == 1:
+            self.node.parent.remove()
+        self.node.remove()
+
+
+class Compiler(NodeVisitor):
+    def compile(self, tree, data_cls_getter=None, **kwargs):
+        """Compile a raw AST into a form where conditional expressions
+        are represented by ConditionalValue objects that can be evaluated
+        at runtime.
+
+        tree - The root node of the wptmanifest AST to compile
+
+        data_cls_getter - A function taking two parameters; the previous
+                          output node and the current ast node and returning
+                          the class of the output node to use for the current
+                          ast node
+        """
+        if data_cls_getter is None:
+            self.data_cls_getter = lambda x, y: ManifestItem
+        else:
+            self.data_cls_getter = data_cls_getter
+
+        self.tree = tree
+        self.output_node = self._initial_output_node(tree, **kwargs)
+        self.visit(tree)
+        assert self.output_node is not None
+        return self.output_node
+
+    def compile_condition(self, condition):
+        """Compile a ConditionalNode into a ConditionalValue.
+
+        condition: A ConditionalNode"""
+        data_node = DataNode()
+        key_value_node = KeyValueNode()
+        key_value_node.append(condition.copy())
+        data_node.append(key_value_node)
+        manifest_item = self.compile(data_node)
+        return manifest_item._data[None][0]
+
+    def _initial_output_node(self, node, **kwargs):
+        return self.data_cls_getter(None, None)(node, **kwargs)
+
+    def visit_DataNode(self, node):
+        if node != self.tree:
+            output_parent = self.output_node
+            self.output_node = self.data_cls_getter(self.output_node, node)(node)
+        else:
+            output_parent = None
+
+        assert self.output_node is not None
+
+        for child in node.children:
+            self.visit(child)
+
+        if output_parent is not None:
+            # Append to the parent *after* processing all the node data
+            output_parent.append(self.output_node)
+            self.output_node = self.output_node.parent
+
+        assert self.output_node is not None
+
+    def visit_KeyValueNode(self, node):
+        key_values = []
+        for child in node.children:
+            condition, value = self.visit(child)
+            key_values.append(ConditionalValue(child, condition))
+
+        self.output_node._add_key_value(node, key_values)
+
+    def visit_ValueNode(self, node):
+        return (lambda x: True, node.data)
+
+    def visit_ConditionalNode(self, node):
+        return self.visit(node.children[0]), self.visit(node.children[1])
+
+    def visit_StringNode(self, node):
+        indexes = [self.visit(child) for child in node.children]
+
+        def value(x):
+            rv = node.data
+            for index in indexes:
+                rv = rv[index(x)]
+            return rv
+        return value
+
+    def visit_NumberNode(self, node):
+        if "." in node.data:
+            return lambda x: float(node.data)
+        else:
+            return lambda x: int(node.data)
+
+    def visit_VariableNode(self, node):
+        indexes = [self.visit(child) for child in node.children]
+
+        def value(x):
+            data = x[node.data]
+            for index in indexes:
+                data = data[index(x)]
+            return data
+        return value
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        return self.visit(node.children[0])
+
+    def visit_UnaryExpressionNode(self, node):
+        assert len(node.children) == 2
+        operator = self.visit(node.children[0])
+        operand = self.visit(node.children[1])
+
+        return lambda x: operator(operand(x))
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        operator = self.visit(node.children[0])
+        operand_0 = self.visit(node.children[1])
+        operand_1 = self.visit(node.children[2])
+
+        assert operand_0 is not None
+        assert operand_1 is not None
+
+        return lambda x: operator(operand_0(x), operand_1(x))
+
+    def visit_UnaryOperatorNode(self, node):
+        return {"not": operator.not_}[node.data]
+
+    def visit_BinaryOperatorNode(self, node):
+        return {"and": operator.and_,
+                "or": operator.or_,
+                "==": operator.eq,
+                "!=": operator.ne}[node.data]
+
+
+class ManifestItem(object):
+    def __init__(self, node=None, **kwargs):
+        self.node = node
+        self.parent = None
+        self.children = []
+        self._data = {}
+
+    def __repr__(self):
+        return "<ManifestItem %s>" % (self.node.data)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def root(self):
+        node = self
+        while node.parent is not None:
+            node = node.parent
+        return node
+
+    @property
+    def name(self):
+        return self.node.data
+
+    def has_key(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return True
+        return False
+
+    def get(self, key, run_info=None):
+        if run_info is None:
+            run_info = {}
+
+        for node in [self, self.root]:
+            if key in node._data:
+                for cond_value in node._data[key]:
+                    try:
+                        matches = cond_value(run_info)
+                    except KeyError:
+                        matches = False
+                    if matches:
+                        return cond_value.value
+        raise KeyError
+
+    def set(self, key, value, condition=None):
+        # First try to update the existing value
+        if key in self._data:
+            cond_values = self._data[key]
+            for cond_value in cond_values:
+                if cond_value.condition_node == condition:
+                    cond_value.value = value
+                    return
+            # If there isn't a conditional match reuse the existing KeyValueNode as the
+            # parent
+            node = None
+            for child in self.node.children:
+                if child.data == key:
+                    node = child
+                    break
+            assert node is not None
+
+        else:
+            node = KeyValueNode(key)
+            self.node.append(node)
+
+        value_node = ValueNode(value)
+        if condition is not None:
+            conditional_node = ConditionalNode()
+            conditional_node.append(condition)
+            conditional_node.append(value_node)
+            node.append(conditional_node)
+            cond_value = Compiler().compile_condition(conditional_node)
+        else:
+            node.append(value_node)
+            cond_value = ConditionalValue(value_node, lambda x: True)
+
+        # Update the cache of child values. This is pretty annoying and maybe
+        # it should just work directly on the tree
+        if key not in self._data:
+            self._data[key] = []
+        if self._data[key] and self._data[key][-1].condition_node is None:
+            self._data[key].insert(len(self._data[key]) - 1, cond_value)
+        else:
+            self._data[key].append(cond_value)
+
+    def _add_key_value(self, node, values):
+        """Called during construction to set a key-value node"""
+        self._data[node.data] = values
+
+    def append(self, child):
+        self.children.append(child)
+        child.parent = self
+        if child.node.parent != self.node:
+            self.node.append(child.node)
+        return child
+
+    def remove(self):
+        if self.parent:
+            self.parent._remove_child(self)
+
+    def _remove_child(self, child):
+        self.children.remove(child)
+        child.parent = None
+
+    def iterchildren(self, name=None):
+        for item in self.children:
+            if item.name == name or name is None:
+                yield item
+
+    def _flatten(self):
+        rv = {}
+        for node in [self, self.root]:
+            for name, value in node._data.iteritems():
+                if name not in rv:
+                    rv[name] = value
+        return rv
+
+    def iteritems(self):
+        for item in self._flatten().iteritems():
+            yield item
+
+    def iterkeys(self):
+        for item in self._flatten().iterkeys():
+            yield item
+
+    def remove_value(self, key, value):
+        self._data[key].remove(value)
+        if not self._data[key]:
+            del self._data[key]
+        value.remove()
+
+
+def compile_ast(ast, data_cls_getter=None, **kwargs):
+    return Compiler().compile(ast, data_cls_getter=data_cls_getter, **kwargs)
+
+
+def compile(stream, data_cls_getter=None, **kwargs):
+    return compile_ast(parse(stream),
+                       data_cls_getter=data_cls_getter,
+                       **kwargs)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/backends/static.py
@@ -0,0 +1,218 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import operator
+
+from ..node import NodeVisitor
+from ..parser import parse
+
+
+class Compiler(NodeVisitor):
+    """Compiler backend that evaluates conditional expressions
+    to give static output"""
+
+    def compile(self, tree, expr_data, data_cls_getter=None, **kwargs):
+        """Compile a raw AST into a form with conditional expressions
+        evaluated.
+
+        tree - The root node of the wptmanifest AST to compile
+
+        expr_data - A dictionary of key / value pairs to use when
+                    evaluating conditional expressions
+
+        data_cls_getter - A function taking two parameters; the previous
+                          output node and the current ast node and returning
+                          the class of the output node to use for the current
+                          ast node
+        """
+
+        self._kwargs = kwargs
+        self.expr_data = expr_data
+
+        if data_cls_getter is None:
+            self.data_cls_getter = lambda x, y: ManifestItem
+        else:
+            self.data_cls_getter = data_cls_getter
+
+        self.output_node = None
+        self.visit(tree)
+        return self.output_node
+
+    def visit_DataNode(self, node):
+        output_parent = self.output_node
+        if self.output_node is None:
+            assert node.parent is None
+            self.output_node = self.data_cls_getter(None, None)(None, **self._kwargs)
+        else:
+            self.output_node = self.data_cls_getter(self.output_node, node)(node.data)
+
+        for child in node.children:
+            self.visit(child)
+
+        if output_parent is not None:
+            output_parent.append(self.output_node)
+            self.output_node = self.output_node.parent
+
+    def visit_KeyValueNode(self, node):
+        key_name = node.data
+        key_value = None
+        for child in node.children:
+            value = self.visit(child)
+            if value is not None:
+                key_value = value
+                break
+        if key_value is not None:
+            self.output_node.set(key_name, key_value)
+
+    def visit_ValueNode(self, node):
+        return node.data
+
+    def visit_ConditionalNode(self, node):
+        assert len(node.children) == 2
+        if self.visit(node.children[0]):
+            return self.visit(node.children[1])
+
+    def visit_StringNode(self, node):
+        value = node.data
+        for child in node.children:
+            value = self.visit(child)(value)
+        return value
+
+    def visit_NumberNode(self, node):
+        if "." in node.data:
+            return float(node.data)
+        else:
+            return int(node.data)
+
+    def visit_VariableNode(self, node):
+        value = self.expr_data[node.data]
+        for child in node.children:
+            value = self.visit(child)(value)
+        return value
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        index = self.visit(node.children[0])
+        return lambda x: x[index]
+
+    def visit_UnaryExpressionNode(self, node):
+        assert len(node.children) == 2
+        operator = self.visit(node.children[0])
+        operand = self.visit(node.children[1])
+
+        return operator(operand)
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        operator = self.visit(node.children[0])
+        operand_0 = self.visit(node.children[1])
+        operand_1 = self.visit(node.children[2])
+
+        return operator(operand_0, operand_1)
+
+    def visit_UnaryOperatorNode(self, node):
+        return {"not": operator.not_}[node.data]
+
+    def visit_BinaryOperatorNode(self, node):
+        return {"and": operator.and_,
+                "or": operator.or_,
+                "==": operator.eq,
+                "!=": operator.ne}[node.data]
+
+
+class ManifestItem(object):
+    def __init__(self, name, **kwargs):
+        self.parent = None
+        self.name = name
+        self.children = []
+        self._data = {}
+
+    def __repr__(self):
+        return "<ManifestItem %s>" % (self.name)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    @property
+    def is_empty(self):
+        if self._data:
+            return False
+        return all(child.is_empty for child in self.children)
+
+    @property
+    def root(self):
+        node = self
+        while node.parent is not None:
+            node = node.parent
+        return node
+
+    def has_key(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return True
+        return False
+
+    def get(self, key):
+        for node in [self, self.root]:
+            if key in node._data:
+                return node._data[key]
+        raise KeyError
+
+    def set(self, name, value):
+        self._data[name] = value
+
+    def remove(self):
+        if self.parent:
+            self.parent._remove_child(self)
+
+    def _remove_child(self, child):
+        self.children.remove(child)
+        child.parent = None
+
+    def iterchildren(self, name=None):
+        for item in self.children:
+            if item.name == name or name is None:
+                yield item
+
+    def _flatten(self):
+        rv = {}
+        for node in [self, self.root]:
+            for name, value in node._data.iteritems():
+                if name not in rv:
+                    rv[name] = value
+        return rv
+
+    def iteritems(self):
+        for item in self._flatten().iteritems():
+            yield item
+
+    def iterkeys(self):
+        for item in self._flatten().iterkeys():
+            yield item
+
+    def itervalues(self):
+        for item in self._flatten().itervalues():
+            yield item
+
+    def append(self, child):
+        child.parent = self
+        self.children.append(child)
+        return child
+
+
+def compile_ast(ast, expr_data, data_cls_getter=None, **kwargs):
+    return Compiler().compile(ast,
+                              expr_data,
+                              data_cls_getter=data_cls_getter,
+                              **kwargs)
+
+
+def compile(stream, expr_data, data_cls_getter=None, **kwargs):
+    return compile_ast(parse(stream),
+                       expr_data,
+                       data_cls_getter=data_cls_getter,
+                       **kwargs)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/node.py
@@ -0,0 +1,151 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+class NodeVisitor(object):
+    def visit(self, node):
+        # This is ugly as hell, but we don't have multimethods and
+        # they aren't trivial to fake without access to the class
+        # object from the class body
+        func = getattr(self, "visit_%s" % (node.__class__.__name__))
+        return func(node)
+
+
+class Node(object):
+    def __init__(self, data=None):
+        self.data = data
+        self.parent = None
+        self.children = []
+
+    def append(self, other):
+        other.parent = self
+        self.children.append(other)
+
+    def remove(self):
+        self.parent.children.remove(self)
+
+    def __repr__(self):
+        return "<%s %s>" % (self.__class__.__name__, self.data)
+
+    def __str__(self):
+        rv = [repr(self)]
+        for item in self.children:
+            rv.extend("  %s" % line for line in str(item).split("\n"))
+        return "\n".join(rv)
+
+    def __eq__(self, other):
+        if not (self.__class__ == other.__class__ and
+                self.data == other.data and
+                len(self.children) == len(other.children)):
+            return False
+        for child, other_child in zip(self.children, other.children):
+            if not child == other_child:
+                return False
+        return True
+
+    def copy(self):
+        new = self.__class__(self.data)
+        for item in self.children:
+            new.append(item.copy())
+        return new
+
+
+class DataNode(Node):
+    def append(self, other):
+        # Append that retains the invariant that child data nodes
+        # come after child nodes of other types
+        other.parent = self
+        if isinstance(other, DataNode):
+            self.children.append(other)
+        else:
+            index = len(self.children)
+            while index > 0 and isinstance(self.children[index - 1], DataNode):
+                index -= 1
+            for i in xrange(index):
+                assert other.data != self.children[i].data
+            self.children.insert(index, other)
+
+
+class KeyValueNode(Node):
+    def append(self, other):
+        # Append that retains the invariant that conditional nodes
+        # come before unconditional nodes
+        other.parent = self
+        if isinstance(other, ValueNode):
+            if self.children:
+                assert not isinstance(self.children[-1], ValueNode)
+            self.children.append(other)
+        else:
+            if self.children and isinstance(self.children[-1], ValueNode):
+                self.children.insert(len(self.children) - 1, other)
+            else:
+                self.children.append(other)
+
+
+class ValueNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class ConditionalNode(Node):
+    pass
+
+
+class UnaryExpressionNode(Node):
+    def __init__(self, operator, operand):
+        Node.__init__(self)
+        self.append(operator)
+        self.append(operand)
+
+    def append(self, other):
+        Node.append(self, other)
+        assert len(self.children) <= 2
+
+    def copy(self):
+        new = self.__class__(self.children[0].copy(),
+                             self.children[1].copy())
+        return new
+
+
+class BinaryExpressionNode(Node):
+    def __init__(self, operator, operand_0, operand_1):
+        Node.__init__(self)
+        self.append(operator)
+        self.append(operand_0)
+        self.append(operand_1)
+
+    def append(self, other):
+        Node.append(self, other)
+        assert len(self.children) <= 3
+
+    def copy(self):
+        new = self.__class__(self.children[0].copy(),
+                             self.children[1].copy(),
+                             self.children[2].copy())
+        return new
+
+
+class UnaryOperatorNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class BinaryOperatorNode(Node):
+    def append(self, other):
+        raise TypeError
+
+
+class IndexNode(Node):
+    pass
+
+
+class VariableNode(Node):
+    pass
+
+
+class StringNode(Node):
+    pass
+
+
+class NumberNode(ValueNode):
+    pass
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/parser.py
@@ -0,0 +1,587 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#default_value:foo
+#include: other.manifest
+#
+#[test_name.js]
+#  expected: ERROR
+#
+#  [subtest 1]
+#    expected:
+#      os == win: FAIL #This is a comment
+#      PASS
+#
+
+# TODO: keep comments in the tree
+
+import types
+from cStringIO import StringIO
+
+from node import *
+
+
+class ParseError(Exception):
+    pass
+
+eol = object
+group_start = object
+group_end = object
+digits = "0123456789"
+open_parens = "[("
+close_parens = "])"
+parens = open_parens + close_parens
+operator_chars = "=!"
+
+unary_operators = ["not"]
+binary_operators = ["==", "!=", "and", "or"]
+
+operators = ["==", "!=", "not", "and", "or"]
+
+
+def decode(byte_str):
+    return byte_str.decode("string_escape").decode("utf8")
+
+
+def precedence(operator_node):
+    return len(operators) - operators.index(operator_node.data)
+
+
+class TokenTypes(object):
+    def __init__(self):
+        for type in ["group_start", "group_end", "paren", "separator", "ident", "string", "number", "eof"]:
+            setattr(self, type, type)
+
+token_types = TokenTypes()
+
+
+class Tokenizer(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.indent_levels = [0]
+        self.state = self.line_start_state
+        self.next_state = self.data_line_state
+        self.line_number = 0
+
+    def tokenize(self, stream):
+        self.reset()
+        if type(stream) in types.StringTypes:
+            stream = StringIO(stream)
+
+        for i, line in enumerate(stream):
+            self.state = self.line_start_state
+            self.line_number = i + 1
+            self.index = 0
+            self.line = line.rstrip()
+            if self.line:
+                while self.state != self.eol_state:
+                    tokens = self.state()
+                    if tokens:
+                        for token in tokens:
+                            yield token
+        while True:
+            yield (token_types.eof, None)
+
+    def char(self):
+        if self.index == len(self.line):
+            return eol
+        return self.line[self.index]
+
+    def consume(self):
+        if self.index < len(self.line):
+            self.index += 1
+
+    def peek(self, length):
+        return self.line[self.index:self.index + length]
+
+    def skip_whitespace(self):
+        while self.char() == " ":
+            self.consume()
+
+    def eol_state(self):
+        pass
+
+    def line_start_state(self):
+        self.skip_whitespace()
+        assert self.char() != eol
+        if self.index > self.indent_levels[-1]:
+            self.indent_levels.append(self.index)
+            yield (token_types.group_start, None)
+        else:
+            while self.index < self.indent_levels[-1]:
+                self.indent_levels.pop()
+                yield (token_types.group_end, None)
+                # This is terrible; if we were parsing an expression
+                # then the next_state will be expr_or_value but when we deindent
+                # it must always be a heading or key next so we go back to data_line_state
+                self.next_state = self.data_line_state
+            if self.index != self.indent_levels[-1]:
+                raise ParseError("Unexpected indent")
+
+        self.state = self.next_state
+
+    def data_line_state(self):
+        if self.char() == "[":
+            yield (token_types.paren, self.char())
+            self.consume()
+            self.state = self.heading_state
+        else:
+            self.state = self.key_state
+
+    def heading_state(self):
+        index_0 = self.index
+        skip_indexes = []
+        while True:
+            c = self.char()
+            if c == "\\":
+                self.consume()
+                c = self.char()
+                if c == eol:
+                    raise ParseError("Unexpected EOL in heading")
+                elif c == "]":
+                    skip_indexes.append(self.index - 1)
+                self.consume()
+            elif c == "]":
+                break
+            elif c == eol:
+                raise ParseError("EOL in heading")
+            else:
+                self.consume()
+
+        self.state = self.line_end_state
+        index_1 = self.index
+        parts = []
+        min_index = index_0
+        for index in skip_indexes:
+            parts.append(self.line[min_index:index])
+            min_index = index + 1
+        parts.append(self.line[min_index:index_1])
+        yield (token_types.string, decode("".join(parts)))
+        yield (token_types.paren, "]")
+        self.consume()
+        self.state = self.line_end_state
+        self.next_state = self.data_line_state
+
+    def key_state(self):
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == " ":
+                index_1 = self.index
+                self.skip_whitespace()
+                if self.char() != ":":
+                    raise ParseError("Space in key name")
+                break
+            elif c == ":":
+                index_1 = self.index
+                break
+            elif c == eol:
+                raise ParseError("EOL in key name (missing ':'?)")
+            else:
+                self.consume()
+        yield (token_types.string, decode(self.line[index_0:index_1]))
+        yield (token_types.separator, ":")
+        self.consume()
+        self.state = self.after_key_state
+
+    def after_key_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == "#":
+            self.next_state = self.expr_or_value_state
+            self.state = self.comment_state
+        elif c == eol:
+            self.next_state = self.expr_or_value_state
+            self.state = self.eol_state
+        else:
+            self.state = self.value_state
+
+    def value_state(self):
+        self.skip_whitespace()
+        index_0 = self.index
+        if self.char() in ("'", '"'):
+            quote_char = self.char()
+            self.consume()
+            yield (token_types.string, decode(self.read_string(quote_char)))
+        else:
+            index_1 = self.index
+            while True:
+                c = self.char()
+                if c == "\\":
+                    self.consume()
+                    if self.char() == eol:
+                        raise ParseError("EOL in character escape")
+                elif c == "#":
+                    self.state = self.comment_state
+                    break
+                elif c == " ":
+                    # prevent whitespace before comments from being included in the value
+                    pass
+                elif c == eol:
+                    break
+                else:
+                    index_1 = self.index
+                self.consume()
+            yield (token_types.string, decode(self.line[index_0:index_1 + 1]))
+        self.state = self.line_end_state
+
+    def comment_state(self):
+        self.state = self.eol_state
+
+    def line_end_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == "#":
+            self.state = self.comment_state
+        elif c == eol:
+            self.state = self.eol_state
+        else:
+            raise ParseError("Junk before EOL c")
+
+    def read_string(self, quote_char):
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == "\\":
+                self.consume()
+                if self.char == eol:
+                    raise ParseError("EOL following quote")
+                self.consume()
+            elif c == quote_char:
+                break
+            elif c == eol:
+                raise ParseError("EOL in quoted string")
+            else:
+                self.consume()
+        rv = self.line[index_0:self.index]
+        self.consume()
+        return rv
+
+    def expr_or_value_state(self):
+        if self.peek(3) == "if ":
+            self.state = self.expr_state
+        else:
+            self.state = self.value_state
+
+    def expr_state(self):
+        self.skip_whitespace()
+        c = self.char()
+        if c == eol:
+            raise ParseError("EOL in expression")
+        elif c in "'\"":
+            self.consume()
+            yield (token_types.string, decode(self.read_string(c)))
+        elif c == "#":
+            raise ParseError("Comment before end of expression")
+        elif c == ":":
+            yield (token_types.separator, c)
+            self.consume()
+            self.state = self.value_state
+        elif c in parens:
+            self.consume()
+            yield (token_types.paren, c)
+        elif c in ("!", "="):
+            self.state = self.operator_state
+        elif c in digits:
+            self.state = self.digit_state
+        else:
+            self.state = self.ident_state
+
+    def operator_state(self):
+        # Only symbolic operators
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c in operator_chars:
+                self.consume()
+            else:
+                self.state = self.expr_state
+                break
+        yield (token_types.ident, self.line[index_0:self.index])
+
+    def digit_state(self):
+        index_0 = self.index
+        seen_dot = False
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c in digits:
+                self.consume()
+            elif c == ".":
+                if seen_dot:
+                    raise ParseError("Invalid number")
+                self.consume()
+                seen_dot = True
+            elif c in parens:
+                break
+            elif c in operator_chars:
+                break
+            elif c == " ":
+                break
+            elif c == ":":
+                break
+            else:
+                raise ParseError("Invalid character in number")
+
+        self.state = self.expr_state
+        yield (token_types.number, self.line[index_0:self.index])
+
+    def ident_state(self):
+        index_0 = self.index
+        while True:
+            c = self.char()
+            if c == eol:
+                break
+            elif c == ".":
+                break
+            elif c in parens:
+                break
+            elif c in operator_chars:
+                break
+            elif c == " ":
+                break
+            elif c == ":":
+                break
+            else:
+                self.consume()
+        self.state = self.expr_state
+        yield (token_types.ident, self.line[index_0:self.index])
+
+
+class Parser(object):
+    def __init__(self):
+        self.reset()
+
+    def reset(self):
+        self.token = None
+        self.unary_operators = "!"
+        self.binary_operators = frozenset(["&&", "||", "=="])
+        self.tokenizer = Tokenizer()
+        self.token_generator = None
+        self.tree = Treebuilder(DataNode(None))
+        self.expr_builder = None
+        self.expr_builders = []
+
+    def parse(self, input):
+        self.reset()
+        self.token_generator = self.tokenizer.tokenize(input)
+        self.consume()
+        self.manifest()
+        return self.tree.node
+
+    def consume(self):
+        self.token = self.token_generator.next()
+
+    def expect(self, type, value=None):
+        if self.token[0] != type:
+            raise ParseError
+        if value is not None:
+            if self.token[1] != value:
+                raise ParseError
+
+        self.consume()
+
+    def manifest(self):
+        self.data_block()
+        self.expect(token_types.eof)
+
+    def data_block(self):
+        while self.token[0] == token_types.string:
+            self.tree.append(KeyValueNode(self.token[1]))
+            self.consume()
+            self.expect(token_types.separator)
+            self.value_block()
+            self.tree.pop()
+
+        while self.token == (token_types.paren, "["):
+            self.consume()
+            if self.token[0] != token_types.string:
+                raise ParseError
+            self.tree.append(DataNode(self.token[1]))
+            self.consume()
+            self.expect(token_types.paren, "]")
+            if self.token[0] == token_types.group_start:
+                self.consume()
+                self.data_block()
+                self.eof_or_end_group()
+            self.tree.pop()
+
+    def eof_or_end_group(self):
+        if self.token[0] != token_types.eof:
+            self.expect(token_types.group_end)
+
+    def value_block(self):
+        if self.token[0] == token_types.string:
+            self.value()
+        elif self.token[0] == token_types.group_start:
+            self.consume()
+            self.expression_values()
+            if self.token[0] == token_types.string:
+                self.value()
+            self.eof_or_end_group()
+        else:
+            raise ParseError
+
+    def expression_values(self):
+        while self.token == (token_types.ident, "if"):
+            self.consume()
+            self.tree.append(ConditionalNode())
+            self.expr_start()
+            self.expect(token_types.separator)
+            if self.token[0] == token_types.string:
+                self.value()
+            else:
+                raise ParseError
+            self.tree.pop()
+
+    def value(self):
+        self.tree.append(ValueNode(self.token[1]))
+        self.consume()
+        self.tree.pop()
+
+    def expr_start(self):
+        self.expr_builder = ExpressionBuilder()
+        self.expr_builders.append(self.expr_builder)
+        self.expr()
+        expression = self.expr_builder.finish()
+        self.expr_builders.pop()
+        self.expr_builder = self.expr_builders[-1] if self.expr_builders else None
+        if self.expr_builder:
+            self.expr_builder.operands[-1].children[-1].append(expression)
+        else:
+            self.tree.append(expression)
+            self.tree.pop()
+
+    def expr(self):
+        self.expr_operand()
+        while (self.token[0] == token_types.ident and self.token[1] in binary_operators):
+            self.expr_bin_op()
+            self.expr_operand()
+
+    def expr_operand(self):
+        if self.token == (token_types.paren, "("):
+            self.consume()
+            self.expr_builder.left_paren()
+            self.expr()
+            self.expect(token_types.paren, ")")
+            self.expr_builder.right_paren()
+        elif self.token[0] == token_types.ident and self.token[1] in unary_operators:
+            self.expr_unary_op()
+            self.expr_operand()
+        elif self.token[0] in [token_types.string, token_types.ident]:
+            self.expr_value()
+        elif self.token[0] == token_types.number:
+            self.expr_number()
+        else:
+            raise ParseError
+
+    def expr_unary_op(self):
+        if self.token[1] in unary_operators:
+            self.expr_builder.push_operator(UnaryOperatorNode(self.token[1]))
+            self.consume()
+        else:
+            raise ParseError()
+
+    def expr_bin_op(self):
+        if self.token[1] in binary_operators:
+            self.expr_builder.push_operator(BinaryOperatorNode(self.token[1]))
+            self.consume()
+        else:
+            raise ParseError()
+
+    def expr_value(self):
+        node_type = {token_types.string: StringNode,
+                     token_types.ident: VariableNode}[self.token[0]]
+        self.expr_builder.push_operand(node_type(self.token[1]))
+        self.consume()
+        if self.token == (token_types.paren, "["):
+            self.consume()
+            self.expr_builder.operands[-1].append(IndexNode())
+            self.expr_start()
+            self.expect(token_types.paren, "]")
+
+    def expr_number(self):
+        self.expr_builder.push_operand(NumberNode(self.token[1]))
+        self.consume()
+
+
+class Treebuilder(object):
+    def __init__(self, root):
+        self.root = root
+        self.node = root
+
+    def append(self, node):
+        self.node.append(node)
+        self.node = node
+        return node
+
+    def pop(self):
+        node = self.node
+        self.node = self.node.parent
+        return node
+
+
+class ExpressionBuilder(object):
+    def __init__(self):
+        self.operands = []
+        self.operators = [None]
+
+    def finish(self):
+        while self.operators[-1] is not None:
+            self.pop_operator()
+        rv = self.pop_operand()
+        assert self.is_empty()
+        return rv
+
+    def left_paren(self):
+        self.operators.append(None)
+
+    def right_paren(self):
+        while self.operators[-1] is not None:
+            self.pop_operator()
+            if not self.operators:
+                raise ParseError("Unbalanced parens")
+
+        assert self.operators.pop() is None
+
+    def push_operator(self, operator):
+        assert operator is not None
+        while self.precedence(self.operators[-1]) > self.precedence(operator):
+            self.pop_operator()
+
+        self.operators.append(operator)
+
+    def pop_operator(self):
+        operator = self.operators.pop()
+        if isinstance(operator, BinaryOperatorNode):
+            operand_1 = self.operands.pop()
+            operand_0 = self.operands.pop()
+            self.operands.append(BinaryExpressionNode(operator, operand_0, operand_1))
+        else:
+            operand_0 = self.operands.pop()
+            self.operands.append(UnaryExpressionNode(operator, operand_0))
+
+    def push_operand(self, node):
+        self.operands.append(node)
+
+    def pop_operand(self):
+        return self.operands.pop()
+
+    def is_empty(self):
+        return len(self.operands) == 0 and all(item is None for item in self.operators)
+
+    def precedence(self, operator):
+        if operator is None:
+            return 0
+        return precedence(operator)
+
+
+def parse(stream):
+    p = Parser()
+    return p.parse(stream)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/serializer.py
@@ -0,0 +1,111 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from node import NodeVisitor, ValueNode, BinaryExpressionNode
+from parser import precedence
+
+
+def escape(string, extras=""):
+    rv = string.encode("utf8").encode("string_escape")
+    for extra in extras:
+        rv = rv.replace(extra, "\\" + extra)
+    return rv
+
+
+class ManifestSerializer(NodeVisitor):
+    def __init__(self, skip_empty_data=False):
+        self.skip_empty_data = skip_empty_data
+
+    def serialize(self, root):
+        self.indent = 2
+        rv = "\n".join(self.visit(root))
+        if rv[-1] != "\n":
+            rv = rv + "\n"
+        return rv
+
+    def visit_DataNode(self, node):
+        rv = []
+        if not self.skip_empty_data or node.children:
+            if node.data:
+                rv.append("[%s]" % escape(node.data, extras="]"))
+                indent = self.indent * " "
+            else:
+                indent = ""
+
+            for child in node.children:
+                rv.extend("%s%s" % (indent if item else "", item) for item in self.visit(child))
+
+            if node.parent:
+                rv.append("")
+
+        return rv
+
+    def visit_KeyValueNode(self, node):
+        rv = [node.data + ":"]
+        indent = " " * self.indent
+
+        if len(node.children) == 1 and isinstance(node.children[0], ValueNode):
+            rv[0] += " %s" % escape(self.visit(node.children[0])[0])
+        else:
+            for child in node.children:
+                rv.append(indent + self.visit(child)[0])
+
+        return rv
+
+    def visit_ValueNode(self, node):
+        return [escape(node.data)]
+
+    def visit_ConditionalNode(self, node):
+        return ["if %s: %s" % tuple(self.visit(item)[0] for item in node.children)]
+
+    def visit_StringNode(self, node):
+        rv = ["\"%s\"" % node.data]
+        for child in node.children:
+            rv[0] += self.visit(child)[0]
+        return rv
+
+    def visit_NumberNode(self, node):
+        return [node.data]
+
+    def visit_VariableNode(self, node):
+        rv = node.data
+        for child in node.children:
+            rv += self.visit(child)
+        return [rv]
+
+    def visit_IndexNode(self, node):
+        assert len(node.children) == 1
+        return ["[%s]" % self.visit(node.children[0])[0]]
+
+    def visit_UnaryExpressionNode(self, node):
+        children = []
+        for child in node.children:
+            child_str = self.visit(child)[0]
+            if isinstance(child, BinaryExpressionNode):
+                child_str = "(%s)" % child_str
+            children.append(child_str)
+        return [" ".join(children)]
+
+    def visit_BinaryExpressionNode(self, node):
+        assert len(node.children) == 3
+        children = []
+        for child_index in [1, 0, 2]:
+            child = node.children[child_index]
+            child_str = self.visit(child)[0]
+            if (isinstance(child, BinaryExpressionNode) and
+                precedence(node.children[0]) < precedence(child.children[0])):
+                child_str = "(%s)" % child_str
+            children.append(child_str)
+        return [" ".join(children)]
+
+    def visit_UnaryOperatorNode(self, node):
+        return [node.data]
+
+    def visit_BinaryOperatorNode(self, node):
+        return [node.data]
+
+
+def serialize(tree, *args, **kwargs):
+    s = ManifestSerializer(*args, **kwargs)
+    return s.serialize(tree)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/__init__.py
@@ -0,0 +1,3 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/test_conditional.py
@@ -0,0 +1,150 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+
+from cStringIO import StringIO
+
+from ..backends import conditional
+from ..node import BinaryExpressionNode, BinaryOperatorNode, VariableNode, NumberNode
+
+
+class TestConditional(unittest.TestCase):
+    def parse(self, input_str):
+        return self.parser.parse(StringIO(input_str))
+
+    def compile(self, input_text):
+        return conditional.compile(input_text)
+
+    def test_get_0(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key"), "value")
+        children = list(item for item in manifest.iterchildren())
+        self.assertEquals(len(children), 1)
+        section = children[0]
+        self.assertEquals(section.name, "Heading 1")
+
+        self.assertEquals(section.get("other_key", {"a": 1}), "value_1")
+        self.assertEquals(section.get("other_key", {"a": 2}), "value_2")
+        self.assertEquals(section.get("other_key", {"a": 7}), "value_3")
+        self.assertEquals(section.get("key"), "value")
+
+    def test_get_1(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == "1": value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data)
+
+        children = list(item for item in manifest.iterchildren())
+        section = children[0]
+
+        self.assertEquals(section.get("other_key", {"a": "1"}), "value_1")
+        self.assertEquals(section.get("other_key", {"a": 1}), "value_3")
+
+    def test_get_2(self):
+        data = """
+key:
+  if a[1] == "b": value_1
+  if a[1] == 2: value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key", {"a": "ab"}), "value_1")
+        self.assertEquals(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+    def test_get_3(self):
+        data = """
+key:
+  if a[1] == "ab"[1]: value_1
+  if a[1] == 2: value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        self.assertEquals(manifest.get("key", {"a": "ab"}), "value_1")
+        self.assertEquals(manifest.get("key", {"a": [1, 2]}), "value_2")
+
+    def test_set_0(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+        manifest = self.compile(data)
+
+        manifest.set("new_key", "value_new")
+
+        self.assertEquals(manifest.get("new_key"), "value_new")
+
+    def test_set_1(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        manifest.set("key", "value_new")
+
+        self.assertEquals(manifest.get("key"), "value_new")
+        self.assertEquals(manifest.get("key", {"a": "a"}), "value_1")
+
+    def test_set_2(self):
+        data = """
+key:
+  if a == "a": value_1
+  if a == "b": value_2
+  value_3
+"""
+
+        manifest = self.compile(data)
+
+        expr = BinaryExpressionNode(BinaryOperatorNode("=="),
+                                    VariableNode("a"),
+                                    NumberNode("1"))
+
+        manifest.set("key", "value_new", expr)
+
+        self.assertEquals(manifest.get("key", {"a": 1}), "value_new")
+        self.assertEquals(manifest.get("key", {"a": "a"}), "value_1")
+
+    def test_api_0(self):
+        data = """
+key:
+  if a == 1.5: value_1
+  value_2
+key_1: other_value
+"""
+        manifest = self.compile(data)
+
+        self.assertFalse(manifest.is_empty)
+        self.assertEquals(manifest.root, manifest)
+        self.assertTrue(manifest.has_key("key_1"))
+        self.assertFalse(manifest.has_key("key_2"))
+
+        self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/test_parser.py
@@ -0,0 +1,71 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+
+from cStringIO import StringIO
+
+from .. import parser
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestExpression(unittest.TestCase):
+    def setUp(self):
+        self.parser = parser.Parser()
+
+    def parse(self, input_str):
+        return self.parser.parse(StringIO(input_str))
+
+    def compare(self, input_text, expected):
+        actual = self.parse(input_text)
+        self.match(expected, actual)
+
+    def match(self, expected_node, actual_node):
+        self.assertEquals(expected_node[0], actual_node.__class__.__name__)
+        self.assertEquals(expected_node[1], actual_node.data)
+        self.assertEquals(len(expected_node[2]), len(actual_node.children))
+        for expected_child, actual_child in zip(expected_node[2], actual_node.children):
+            self.match(expected_child, actual_child)
+
+    def test_expr_0(self):
+        self.compare(
+            """
+key:
+  if x == 1 : value""",
+            ["DataNode", None,
+             [["KeyValueNode", "key",
+               [["ConditionalNode", None,
+                 [["BinaryExpressionNode", None,
+                   [["BinaryOperatorNode", "==", []],
+                    ["VariableNode", "x", []],
+                       ["NumberNode", "1", []]
+                    ]],
+                     ["ValueNode", "value", []],
+                  ]]]]]]
+        )
+
+    def test_expr_1(self):
+        self.compare(
+            """
+key:
+  if not x and y : value""",
+            ["DataNode", None,
+             [["KeyValueNode", "key",
+               [["ConditionalNode", None,
+                 [["BinaryExpressionNode", None,
+                   [["BinaryOperatorNode", "and", []],
+                    ["UnaryExpressionNode", None,
+                       [["UnaryOperatorNode", "not", []],
+                        ["VariableNode", "x", []]
+                        ]],
+                       ["VariableNode", "y", []]
+                    ]],
+                     ["ValueNode", "value", []],
+                  ]]]]]]
+        )
+
+if __name__ == "__main__":
+    unittest.main()
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/test_serializer.py
@@ -0,0 +1,124 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+
+from cStringIO import StringIO
+
+from .. import parser, serializer
+
+
+class TokenizerTest(unittest.TestCase):
+    def setUp(self):
+        self.serializer = serializer.ManifestSerializer()
+        self.parser = parser.Parser()
+
+    def serialize(self, input_str):
+        return self.serializer.serialize(self.parser.parse(StringIO(input_str)))
+
+    def compare(self, input_str, expected=None):
+        if expected is None:
+            expected = input_str
+
+        actual = self.serialize(input_str)
+        self.assertEquals(actual, expected)
+
+    def test_0(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key: other_value
+""")
+
+    def test_1(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or b: other_value
+""")
+
+    def test_2(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or b: other_value
+    fallback_value
+""")
+
+    def test_3(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == 1: other_value
+    fallback_value
+""")
+
+    def test_4(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "1": other_value
+    fallback_value
+""")
+
+    def test_5(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "abc"[1]: other_value
+    fallback_value
+""")
+
+    def test_6(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a == "abc"[c]: other_value
+    fallback_value
+""")
+
+    def test_7(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if (a or b) and c: other_value
+    fallback_value
+""",
+"""key: value
+[Heading 1]
+  other_key:
+    if a or b and c: other_value
+    fallback_value
+""")
+
+    def test_8(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if a or (b and c): other_value
+    fallback_value
+""")
+
+    def test_9(self):
+        self.compare("""key: value
+[Heading 1]
+  other_key:
+    if not (a and b): other_value
+    fallback_value
+""")
+
+    def test_10(self):
+        self.compare("""key: value
+[Heading 1]
+  some_key: some_value
+
+[Heading 2]
+  other_key: other_value
+"""
+                     )
+
+    def test_11(self):
+        self.compare("""key:
+  if not a and b and c and d: true
+"""
+                     )
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/test_static.py
@@ -0,0 +1,105 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import unittest
+
+from cStringIO import StringIO
+
+from ..backends import static
+
+# There aren't many tests here because it turns out to be way more convenient to
+# use test_serializer for the majority of cases
+
+
+class TestStatic(unittest.TestCase):
+    def parse(self, input_str):
+        return self.parser.parse(StringIO(input_str))
+
+    def compile(self, input_text, input_data):
+        return static.compile(input_text, input_data)
+
+    def test_get_0(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+
+        manifest = self.compile(data, {"a": 2})
+
+        self.assertEquals(manifest.get("key"), "value")
+        children = list(item for item in manifest.iterchildren())
+        self.assertEquals(len(children), 1)
+        section = children[0]
+        self.assertEquals(section.name, "Heading 1")
+
+        self.assertEquals(section.get("other_key"), "value_2")
+        self.assertEquals(section.get("key"), "value")
+
+    def test_get_1(self):
+        data = """
+key: value
+
+[Heading 1]
+  other_key:
+    if a == 1: value_1
+    if a == 2: value_2
+    value_3
+"""
+        manifest = self.compile(data, {"a": 3})
+
+        children = list(item for item in manifest.iterchildren())
+        section = children[0]
+        self.assertEquals(section.get("other_key"), "value_3")
+
+    def test_get_3(self):
+        data = """key:
+  if a == "1": value_1
+  if a[0] == "ab"[0]: value_2
+"""
+        manifest = self.compile(data, {"a": "1"})
+        self.assertEquals(manifest.get("key"), "value_1")
+
+        manifest = self.compile(data, {"a": "ac"})
+        self.assertEquals(manifest.get("key"), "value_2")
+
+    def test_get_4(self):
+        data = """key:
+  if not a: value_1
+  value_2
+"""
+        manifest = self.compile(data, {"a": True})
+        self.assertEquals(manifest.get("key"), "value_2")
+
+        manifest = self.compile(data, {"a": False})
+        self.assertEquals(manifest.get("key"), "value_1")
+
+    def test_api(self):
+        data = """key:
+  if a == 1.5: value_1
+  value_2
+key_1: other_value
+"""
+        manifest = self.compile(data, {"a": 1.5})
+
+        self.assertFalse(manifest.is_empty)
+        self.assertEquals(manifest.root, manifest)
+        self.assertTrue(manifest.has_key("key_1"))
+        self.assertFalse(manifest.has_key("key_2"))
+
+        self.assertEquals(set(manifest.iterkeys()), set(["key", "key_1"]))
+        self.assertEquals(set(manifest.itervalues()), set(["value_1", "other_value"]))
+
+    def test_is_empty_1(self):
+        data = """
+[Section]
+  [Subsection]
+"""
+        manifest = self.compile(data, {})
+
+        self.assertTrue(manifest.is_empty)
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptmanifest/tests/test_tokenizer.py
@@ -0,0 +1,291 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import sys
+import os
+import unittest
+
+sys.path.insert(0, os.path.abspath(".."))
+from cStringIO import StringIO
+
+from .. import parser
+from ..parser import token_types
+
+
+class TokenizerTest(unittest.TestCase):
+    def setUp(self):
+        self.tokenizer = parser.Tokenizer()
+
+    def tokenize(self, input_str):
+        rv = []
+        for item in self.tokenizer.tokenize(StringIO(input_str)):
+            rv.append(item)
+            if item[0] == token_types.eof:
+                break
+        return rv
+
+    def compare(self, input_text, expected):
+        expected = expected + [(token_types.eof, None)]
+        actual = self.tokenize(input_text)
+        self.assertEquals(actual, expected)
+
+    def test_heading_0(self):
+        self.compare("""[Heading text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_1(self):
+        self.compare("""[Heading [text\]]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading [text]"),
+                      (token_types.paren, "]")])
+
+    def test_heading_2(self):
+        self.compare("""[Heading #text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading #text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_3(self):
+        self.compare("""[Heading [\]text]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading []text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_4(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("[Heading")
+
+    def test_heading_5(self):
+        self.compare("""[Heading [\]text] #comment""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading []text"),
+                      (token_types.paren, "]")])
+
+    def test_heading_6(self):
+        self.compare("""[Heading \\ttext]""",
+                     [(token_types.paren, "["),
+                      (token_types.string, "Heading \ttext"),
+                      (token_types.paren, "]")])
+
+    def test_key_0(self):
+        self.compare("""key:value""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_1(self):
+        self.compare("""key  :  value""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_2(self):
+        self.compare("""key  :  val ue""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "val ue")])
+
+    def test_key_3(self):
+        self.compare("""key: value#comment""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_4(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""ke y: value""")
+
+    def test_key_5(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key""")
+
+    def test_key_6(self):
+        self.compare("""key: "value\"""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_7(self):
+        self.compare("""key: 'value'""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "value")])
+
+    def test_key_8(self):
+        self.compare("""key: "#value\"""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "#value")])
+
+    def test_key_9(self):
+        self.compare("""key: '#value\'""",
+                     [(token_types.string, "key"),
+                      (token_types.separator, ":"),
+                      (token_types.string, "#value")])
+
+    def test_key_10(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: "value""")
+
+    def test_key_11(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value""")
+
+    def test_key_12(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value""")
+
+    def test_key_13(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize("""key: 'value' abc""")
+
+    def test_expr_0(self):
+        self.compare(
+"""
+key:
+  if cond == 1: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_1(self):
+        self.compare(
+"""
+key:
+  if cond == 1: value1
+  value2""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value1"),
+             (token_types.string, "value2")])
+
+    def test_expr_2(self):
+        self.compare(
+"""
+key:
+  if cond=="1": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.string, "1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_3(self):
+        self.compare(
+"""
+key:
+  if cond==1.1: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_4(self):
+        self.compare(
+            """
+key:
+  if cond==1.1 and cond2 == "a": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.ident, "and"),
+             (token_types.ident, "cond2"),
+             (token_types.ident, "=="),
+             (token_types.string, "a"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_5(self):
+        self.compare(
+"""
+key:
+  if (cond==1.1 ): value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.paren, "("),
+             (token_types.ident, "cond"),
+             (token_types.ident, "=="),
+             (token_types.number, "1.1"),
+             (token_types.paren, ")"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_6(self):
+        self.compare(
+"""
+key:
+  if "\\ttest": value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.string, "\ttest"),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+    def test_expr_7(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1A: value""")
+
+    def test_expr_8(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1a: value""")
+
+    def test_expr_9(self):
+        with self.assertRaises(parser.ParseError):
+            self.tokenize(
+"""
+key:
+  if 1.1.1: value""")
+
+    def test_expr_10(self):
+        self.compare(
+"""
+key:
+  if 1.: value""",
+            [(token_types.string, "key"),
+             (token_types.separator, ":"),
+             (token_types.group_start, None),
+             (token_types.ident, "if"),
+             (token_types.number, "1."),
+             (token_types.separator, ":"),
+             (token_types.string, "value")])
+
+if __name__ == "__main__":
+    unittest.main()
new file mode 100644
--- /dev/null
+++ b/testing/web-platform/harness/wptrunner/wptrunner.py
@@ -0,0 +1,657 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import json
+import logging
+import os
+import shutil
+import socket
+import sys
+import threading
+import time
+from StringIO import StringIO
+from collections import defaultdict, OrderedDict
+from multiprocessing import Queue
+
+from mozlog.structured import commandline, stdadapter
+
+import manifestexpected
+import manifestinclude
+import products
+import wptcommandline
+import wpttest
+from testrunner import ManagerGroup
+
+here = os.path.split(__file__)[0]
+
+
+"""Runner for web-platform-tests
+
+The runner has several design goals:
+
+* Tests should run with no modification from upstream.
+
+* Tests should be regarded as "untrusted" so that errors, timeouts and even
+  crashes in the tests can be handled without failing the entire test run.
+
+* For performance tests can be run in multiple browsers in parallel.
+
+The upstream repository has the facility for creating a test manifest in JSON
+format. This manifest is used directly to determine which tests exist. Local
+metadata files are used to store the expected test results.
+
+"""
+
+logger = None
+
+
+def setup_logging(args, defaults):
+    global logger
+    logger = commandline.setup_logging("web-platform-tests", args, defaults)
+    setup_stdlib_logger()
+
+    for name in args.keys():
+        if name.startswith("log_"):
+            args.pop(name)
+
+    return logger
+
+
+def setup_stdlib_logger():
+    logging.root.handlers = []
+    logging.root = stdadapter.std_logging_adapter(logging.root)
+
+
+def do_test_relative_imports(test_root):
+    global serve, manifest
+
+    sys.path.insert(0, os.path.join(test_root))
+    sys.path.insert(0, os.path.join(test_root, "tools", "scripts"))
+    failed = None
+    try:
+        import serve
+    except ImportError:
+        failed = "serve"
+    try:
+        import manifest
+    except ImportError:
+        failed = "manifest"
+
+    if failed:
+        logger.critical(
+            "Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
+            (failed, test_root))
+        sys.exit(1)
+
+class TestEnvironmentError(Exception):
+    pass
+
+
+class TestEnvironment(object):
+    def __init__(self, test_path, options):
+        """Context manager that owns the test environment i.e. the http and
+        websockets servers"""
+        self.test_path = test_path
+        self.server = None
+        self.config = None
+        self.test_server_port = options.pop("test_server_port", True)
+        self.options = options if options is not None else {}
+        self.required_files = options.pop("required_files", [])
+        self.files_to_restore = []
+
+    def __enter__(self):
+        self.copy_required_files()
+
+        config = self.load_config()
+        serve.set_computed_defaults(config)
+
+        serve.logger = serve.default_logger("info")
+        self.config, self.servers = serve.start(config)
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.restore_files()
+        for scheme, servers in self.servers.iteritems():
+            for port, server in servers:
+                server.kill()
+
+    def load_config(self):
+        default_config_path = os.path.join(self.test_path, "config.default.json")
+        local_config_path = os.path.join(here, "config.json")
+
+        with open(default_config_path) as f:
+            default_config = json.load(f)
+
+        with open(local_config_path) as f:
+            data = f.read()
+            local_config = json.loads(data % self.options)
+
+        return serve.merge_json(default_config, local_config)
+
+    def copy_required_files(self):
+        logger.info("Placing required files in server environment.")
+        for source, destination, copy_if_exists in self.required_files:
+            source_path = os.path.join(here, source)
+            dest_path = os.path.join(self.test_path, destination, os.path.split(source)[1])
+            dest_exists = os.path.exists(dest_path)
+            if not dest_exists or copy_if_exists:
+                if dest_exists:
+                    backup_path = dest_path + ".orig"
+                    logger.info("Backing up %s to %s" % (dest_path, backup_path))
+                    self.files_to_restore.append(dest_path)
+                    shutil.copy2(dest_path, backup_path)
+                logger.info("Copying %s to %s" % (source_path, dest_path))
+                shutil.copy2(source_path, dest_path)
+
+    def ensure_started(self):
+        # Pause for a while to ensure that the server has a chance to start
+        time.sleep(2)
+        for scheme, servers in self.servers.iteritems():
+            for port, server in servers:
+                if self.test_server_port:
+                    s = socket.socket()
+                    try:
+                        s.connect((self.config["host"], port))
+                    except socket.error:
+                        raise EnvironmentError(
+                            "%s server on port %d failed to start" % (scheme, port))
+                    finally:
+                        s.close()
+
+                if not server.is_alive():
+                    raise EnvironmentError("%s server on port %d failed to start" % (scheme, port))
+
+    def restore_files(self):
+        for path in self.files_to_restore:
+            os.unlink(path)
+            if os.path.exists(path + ".orig"):
+                os.rename(path + ".orig", path)
+
+
+class TestChunker(object):
+    def __init__(self, total_chunks, chunk_number):
+        self.total_chunks = total_chunks
+        self.chunk_number = chunk_number
+        assert self.chunk_number <= self.total_chunks
+
+    def __call__(self, manifest):
+        raise NotImplementedError
+
+
+class Unchunked(TestChunker):
+    def __init__(self, *args, **kwargs):
+        TestChunker.__init__(self, *args, **kwargs)
+        assert self.total_chunks == 1
+
+    def __call__(self, manifest):
+        for item in manifest:
+            yield item
+
+
+class HashChunker(TestChunker):
+    def __call__(self):
+        chunk_index = self.chunk_number - 1
+        for test_path, tests in manifest:
+            if hash(test_path) % self.total_chunks == chunk_index:
+                yield test_path, tests
+
+
+class EqualTimeChunker(TestChunker):
+    """Chunker that uses the test timeout as a proxy for the running time of the test"""
+
+    def _get_chunk(self, manifest_items):
+        # For each directory containing tests, calculate the maximum execution time after running all
+        # the tests in that directory. Then work out the index into the manifest corresponding to the
+        # directories at fractions of m/N of the running time where m=1..N-1 and N is the total number
+        # of chunks. Return an array of these indicies
+
+        total_time = 0
+        by_dir = OrderedDict()
+
+        class PathData(object):
+            def __init__(self, path):
+                self.path = path
+                self.time = 0
+                self.tests = []
+
+        class Chunk(object):
+            def __init__(self):
+                self.paths = []
+                self.tests = []
+                self.time = 0
+
+            def append(self, path_data):
+                self.paths.append(path_data.path)
+                self.tests.extend(path_data.tests)
+                self.time += path_data.time
+
+        class ChunkList(object):
+            def __init__(self, total_time, n_chunks):
+                self.total_time = total_time
+                self.n_chunks = n_chunks
+
+                self.remaining_chunks = n_chunks
+
+                self.chunks = []
+
+                self.update_time_per_chunk()
+
+            def __iter__(self):
+                for item in self.chunks:
+                    yield item
+
+            def __getitem__(self, i):
+                return self.chunks[i]
+
+            def sort_chunks(self):
+                self.chunks = sorted(self.chunks, key=lambda x:x.paths[0])
+
+            def get_tests(self, chunk_number):
+                return self[chunk_number - 1].tests
+
+            def append(self, chunk):
+                if len(self.chunks) == self.n_chunks:
+                    raise ValueError("Tried to create more than %n chunks" % self.n_chunks)
+                self.chunks.append(chunk)
+                self.remaining_chunks -= 1
+
+            @property
+            def current_chunk(self):
+                if self.chunks:
+                    return self.chunks[-1]