Backed out 2 changesets (bug 1195299) for l10n bustages a=backout
authorBogdan Tara <btara@mozilla.com>
Fri, 05 Apr 2019 14:11:45 +0300
changeset 468113 93075ec49df3982c26873b822d762bd3d8863fad
parent 468111 61540ab18c9b1def917e2799b4a468c12abd4f38
child 468114 0909d7351f7ba3aea64ca4a32ba0ffa60226a4d6
child 468136 1acaa57cfbe8f73c0a7083a37458a95d68835cbb
push id112683
push userbtara@mozilla.com
push dateFri, 05 Apr 2019 11:14:44 +0000
treeherdermozilla-inbound@0909d7351f7b [default view] [failures only]
perfherder[talos] [build metrics] [platform microbench] (compared to previous push)
reviewersbackout
bugs1195299
milestone68.0a1
backs out9645ac1a985159be748c02f38a539d87ea4d0ab6
4de7f94119fd3703dcf7846847084012105ac3a1
first release with
nightly linux32
93075ec49df3 / 68.0a1 / 20190405111221 / files
nightly linux64
93075ec49df3 / 68.0a1 / 20190405111221 / files
nightly mac
93075ec49df3 / 68.0a1 / 20190405111221 / files
nightly win32
93075ec49df3 / 68.0a1 / 20190405111221 / files
nightly win64
93075ec49df3 / 68.0a1 / 20190405111221 / files
last release without
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
releases
nightly linux32
nightly linux64
nightly mac
nightly win32
nightly win64
Backed out 2 changesets (bug 1195299) for l10n bustages a=backout Backed out changeset 9645ac1a9851 (bug 1195299) Backed out changeset 4de7f94119fd (bug 1195299)
python/mozbuild/mozbuild/action/test_archive.py
taskcluster/scripts/builder/build-linux.sh
taskcluster/scripts/tester/test-linux.sh
taskcluster/taskgraph/transforms/job/mozharness.py
testing/mozharness/manifestparser/__init__.py
testing/mozharness/manifestparser/cli.py
testing/mozharness/manifestparser/expression.py
testing/mozharness/manifestparser/filters.py
testing/mozharness/manifestparser/ini.py
testing/mozharness/manifestparser/manifestparser.py
testing/mozharness/mozfile/__init__.py
testing/mozharness/mozfile/mozfile.py
testing/mozharness/mozinfo/__init__.py
testing/mozharness/mozinfo/mozinfo.py
testing/mozharness/mozinfo/string_version.py
testing/mozharness/mozprocess/__init__.py
testing/mozharness/mozprocess/processhandler.py
testing/mozharness/mozprocess/qijo.py
testing/mozharness/mozprocess/winprocess.py
testing/mozharness/tox.ini
--- a/python/mozbuild/mozbuild/action/test_archive.py
+++ b/python/mozbuild/mozbuild/action/test_archive.py
@@ -399,46 +399,16 @@ ARCHIVE_FILES = {
         },
     ],
     'mozharness': [
         {
             'source': buildconfig.topsrcdir,
             'base': 'testing',
             'pattern': 'mozharness/**',
         },
-        {
-            'source': buildconfig.topsrcdir,
-            'base': 'testing/mozbase/manifestparser',
-            'pattern': 'manifestparser/**',
-            'dest': 'mozharness',
-        },
-        {
-            'source': buildconfig.topsrcdir,
-            'base': 'testing/mozbase/mozfile',
-            'pattern': 'mozfile/**',
-            'dest': 'mozharness',
-        },
-        {
-            'source': buildconfig.topsrcdir,
-            'base': 'testing/mozbase/mozinfo',
-            'pattern': 'mozinfo/**',
-            'dest': 'mozharness',
-        },
-        {
-            'source': buildconfig.topsrcdir,
-            'base': 'testing/mozbase/mozprocess',
-            'pattern': 'mozprocess/**',
-            'dest': 'mozharness',
-        },
-        {
-            'source': buildconfig.topsrcdir,
-            'base': 'third_party/python/six',
-            'pattern': 'six.py',
-            'dest': 'mozharness',
-        },
     ],
     'reftest': [
         {
             'source': buildconfig.topobjdir,
             'base': '_tests',
             'pattern': 'reftest/**',
         },
         {
--- a/taskcluster/scripts/builder/build-linux.sh
+++ b/taskcluster/scripts/builder/build-linux.sh
@@ -83,17 +83,17 @@ fi
 
 # $TOOLTOOL_CACHE bypasses mozharness completely and is read by tooltool_wrapper.sh to set the
 # cache.  However, only some mozharness scripts use tooltool_wrapper.sh, so this may not be
 # entirely effective.
 export TOOLTOOL_CACHE
 
 config_path_cmds=""
 for path in ${MOZHARNESS_CONFIG_PATHS}; do
-    config_path_cmds="${config_path_cmds} --extra-config-path ${GECKO_PATH}/${path}"
+    config_path_cmds="${config_path_cmds} --extra-config-path ${WORKSPACE}/build/src/${path}"
 done
 
 # support multiple, space delimited, config files
 config_cmds=""
 for cfg in $MOZHARNESS_CONFIG; do
   config_cmds="${config_cmds} --config ${cfg}"
 done
 
@@ -112,17 +112,17 @@ if [ -n "$MOZHARNESS_OPTIONS" ]; then
     options=""
     for option in $MOZHARNESS_OPTIONS; do
         options="$options --$option"
     done
 fi
 
 cd /builds/worker
 
-$GECKO_PATH/mach python $GECKO_PATH/testing/${MOZHARNESS_SCRIPT} \
+python2.7 $WORKSPACE/build/src/testing/${MOZHARNESS_SCRIPT} \
   ${config_path_cmds} \
   ${config_cmds} \
   $debug_flag \
   $custom_build_variant_cfg_flag \
   $actions \
   $options \
   --log-level=debug \
   --scm-level=$MOZ_SCM_LEVEL \
--- a/taskcluster/scripts/tester/test-linux.sh
+++ b/taskcluster/scripts/tester/test-linux.sh
@@ -14,17 +14,16 @@ elif [ "${DISTRIB_RELEASE}" == "16.04" ]
 fi
 
 ####
 # Taskcluster friendly wrapper for performing fx desktop tests via mozharness.
 ####
 
 # Inputs, with defaults
 
-: GECKO_PATH                    ${GECKO_PATH}
 : MOZHARNESS_PATH               ${MOZHARNESS_PATH}
 : MOZHARNESS_URL                ${MOZHARNESS_URL}
 : MOZHARNESS_SCRIPT             ${MOZHARNESS_SCRIPT}
 : MOZHARNESS_CONFIG             ${MOZHARNESS_CONFIG}
 : MOZHARNESS_OPTIONS            ${MOZHARNESS_OPTIONS}
 : NEED_XVFB                     ${NEED_XVFB:=true}
 : NEED_WINDOW_MANAGER           ${NEED_WINDOW_MANAGER:=false}
 : NEED_PULSEAUDIO               ${NEED_PULSEAUDIO:=false}
@@ -179,30 +178,26 @@ done
 
 if [ -n "$MOZHARNESS_OPTIONS" ]; then
     options=""
     for option in $MOZHARNESS_OPTIONS; do
         options="$options --$option"
     done
 fi
 
-# Use |mach python| if a source checkout exists so in-tree packages are
-# available.
-[[ -d "${GECKO_PATH}" ]] && python="${GECKO_PATH}/mach python" || python="python2.7"
-
-# Save the computed mozharness command to a binary which is useful for
-# interactive mode.
 mozharness_bin="$HOME/bin/run-mozharness"
 mkdir -p $(dirname $mozharness_bin)
 
+# Save the computed mozharness command to a binary which is useful
+# for interactive mode.
 echo -e "#!/usr/bin/env bash
 # Some mozharness scripts assume base_work_dir is in
 # the current working directory, see bug 1279237
 cd "$WORKSPACE"
-cmd=\"${python} ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${options} ${@} \${@}\"
+cmd=\"python2.7 ${MOZHARNESS_PATH}/scripts/${MOZHARNESS_SCRIPT} ${config_cmds} ${options} ${@} \${@}\"
 echo \"Running: \${cmd}\"
 exec \${cmd}" > ${mozharness_bin}
 chmod +x ${mozharness_bin}
 
 # In interactive mode, the user will be prompted with options for what to do.
 if ! $TASKCLUSTER_INTERACTIVE; then
   # run the given mozharness script and configs, but pass the rest of the
   # arguments in from our own invocation
--- a/taskcluster/taskgraph/transforms/job/mozharness.py
+++ b/taskcluster/taskgraph/transforms/job/mozharness.py
@@ -295,29 +295,26 @@ def mozharness_on_generic_worker(config,
     if config.params.is_try():
         env['TRY_COMMIT_MSG'] = config.params['message'] or 'no commit message'
 
     if not job['attributes']['build_platform'].startswith('win'):
         raise Exception(
             "Task generation for mozharness build jobs currently only supported on Windows"
         )
 
-    gecko = env['GECKO_PATH'].replace('.', '%cd%')
-    mh_command = [
-            'c:/mozilla-build/python/python.exe',
-            '{}/mach'.format(gecko), 'python',
-    ]
-    mh_command.append('/'.join([gecko, 'testing', run['script']]))
+    mh_command = [r'c:\mozilla-build\python\python.exe']
+    mh_command.append('\\'.join([r'.\build\src\testing', run['script'].replace('/', '\\')]))
 
     if 'config-paths' in run:
         for path in run['config-paths']:
-            mh_command.append('--extra-config-path {}/{}'.format(gecko, path))
+            mh_command.append(r'--extra-config-path '
+                              r'.\build\src\{}'.format(path.replace('/', '\\')))
 
     for cfg in run['config']:
-        mh_command.append('--config ' + cfg)
+        mh_command.append('--config ' + cfg.replace('/', '\\'))
     if run['use-magic-mh-args']:
         mh_command.append('--branch ' + config.params['project'])
     mh_command.append(r'--work-dir %cd:Z:=z:%\build')
     for action in run.get('actions', []):
         mh_command.append('--' + action)
 
     for option in run.get('options', []):
         mh_command.append('--' + option)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/manifestparser/__init__.py
@@ -0,0 +1,8 @@
+# flake8: noqa
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .manifestparser import *
+from .expression import *
+from .ini import *
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/manifestparser/cli.py
@@ -0,0 +1,247 @@
+#!/usr/bin/env python
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Mozilla universal manifest parser
+"""
+
+from optparse import OptionParser
+import os
+import sys
+
+from .manifestparser import (
+    convert,
+    ManifestParser,
+)
+
+
+class ParserError(Exception):
+    """error for exceptions while parsing the command line"""
+
+
+def parse_args(_args):
+    """
+    parse and return:
+    --keys=value (or --key value)
+    -tags
+    args
+    """
+
+    # return values
+    _dict = {}
+    tags = []
+    args = []
+
+    # parse the arguments
+    key = None
+    for arg in _args:
+        if arg.startswith('---'):
+            raise ParserError("arguments should start with '-' or '--' only")
+        elif arg.startswith('--'):
+            if key:
+                raise ParserError("Key %s still open" % key)
+            key = arg[2:]
+            if '=' in key:
+                key, value = key.split('=', 1)
+                _dict[key] = value
+                key = None
+                continue
+        elif arg.startswith('-'):
+            if key:
+                raise ParserError("Key %s still open" % key)
+            tags.append(arg[1:])
+            continue
+        else:
+            if key:
+                _dict[key] = arg
+                continue
+            args.append(arg)
+
+    # return values
+    return (_dict, tags, args)
+
+
+class CLICommand(object):
+    usage = '%prog [options] command'
+
+    def __init__(self, parser):
+        self._parser = parser  # master parser
+
+    def parser(self):
+        return OptionParser(usage=self.usage, description=self.__doc__,
+                            add_help_option=False)
+
+
+class Copy(CLICommand):
+    usage = '%prog [options] copy manifest directory -tag1 -tag2 --key1=value1 --key2=value2 ...'
+
+    def __call__(self, options, args):
+        # parse the arguments
+        try:
+            kwargs, tags, args = parse_args(args)
+        except ParserError, e:
+            self._parser.error(e.message)
+
+        # make sure we have some manifests, otherwise it will
+        # be quite boring
+        if not len(args) == 2:
+            HelpCLI(self._parser)(options, ['copy'])
+            return
+
+        # read the manifests
+        # TODO: should probably ensure these exist here
+        manifests = ManifestParser()
+        manifests.read(args[0])
+
+        # print the resultant query
+        manifests.copy(args[1], None, *tags, **kwargs)
+
+
+class CreateCLI(CLICommand):
+    """
+    create a manifest from a list of directories
+    """
+    usage = '%prog [options] create directory <directory> <...>'
+
+    def parser(self):
+        parser = CLICommand.parser(self)
+        parser.add_option('-p', '--pattern', dest='pattern',
+                          help="glob pattern for files")
+        parser.add_option('-i', '--ignore', dest='ignore',
+                          default=[], action='append',
+                          help='directories to ignore')
+        parser.add_option('-w', '--in-place', dest='in_place',
+                          help='Write .ini files in place; filename to write to')
+        return parser
+
+    def __call__(self, _options, args):
+        parser = self.parser()
+        options, args = parser.parse_args(args)
+
+        # need some directories
+        if not len(args):
+            parser.print_usage()
+            return
+
+        # add the directories to the manifest
+        for arg in args:
+            assert os.path.exists(arg)
+            assert os.path.isdir(arg)
+            manifest = convert(args, pattern=options.pattern, ignore=options.ignore,
+                               write=options.in_place)
+        if manifest:
+            print manifest
+
+
+class WriteCLI(CLICommand):
+    """
+    write a manifest based on a query
+    """
+    usage = '%prog [options] write manifest <manifest> -tag1 -tag2 --key1=value1 --key2=value2 ...'
+
+    def __call__(self, options, args):
+
+        # parse the arguments
+        try:
+            kwargs, tags, args = parse_args(args)
+        except ParserError, e:
+            self._parser.error(e.message)
+
+        # make sure we have some manifests, otherwise it will
+        # be quite boring
+        if not args:
+            HelpCLI(self._parser)(options, ['write'])
+            return
+
+        # read the manifests
+        # TODO: should probably ensure these exist here
+        manifests = ManifestParser()
+        manifests.read(*args)
+
+        # print the resultant query
+        manifests.write(global_tags=tags, global_kwargs=kwargs)
+
+
+class HelpCLI(CLICommand):
+    """
+    get help on a command
+    """
+    usage = '%prog [options] help [command]'
+
+    def __call__(self, options, args):
+        if len(args) == 1 and args[0] in commands:
+            commands[args[0]](self._parser).parser().print_help()
+        else:
+            self._parser.print_help()
+            print '\nCommands:'
+            for command in sorted(commands):
+                print '  %s : %s' % (command, commands[command].__doc__.strip())
+
+
+class UpdateCLI(CLICommand):
+    """
+    update the tests as listed in a manifest from a directory
+    """
+    usage = '%prog [options] update manifest directory -tag1 -tag2 --key1=value1 --key2=value2 ...'
+
+    def __call__(self, options, args):
+        # parse the arguments
+        try:
+            kwargs, tags, args = parse_args(args)
+        except ParserError, e:
+            self._parser.error(e.message)
+
+        # make sure we have some manifests, otherwise it will
+        # be quite boring
+        if not len(args) == 2:
+            HelpCLI(self._parser)(options, ['update'])
+            return
+
+        # read the manifests
+        # TODO: should probably ensure these exist here
+        manifests = ManifestParser()
+        manifests.read(args[0])
+
+        # print the resultant query
+        manifests.update(args[1], None, *tags, **kwargs)
+
+
+# command -> class mapping
+commands = {'create': CreateCLI,
+            'help': HelpCLI,
+            'update': UpdateCLI,
+            'write': WriteCLI}
+
+
+def main(args=sys.argv[1:]):
+    """console_script entry point"""
+
+    # set up an option parser
+    usage = '%prog [options] [command] ...'
+    description = "%s. Use `help` to display commands" % __doc__.strip()
+    parser = OptionParser(usage=usage, description=description)
+    parser.add_option('-s', '--strict', dest='strict',
+                      action='store_true', default=False,
+                      help='adhere strictly to errors')
+    parser.disable_interspersed_args()
+
+    options, args = parser.parse_args(args)
+
+    if not args:
+        HelpCLI(parser)(options, args)
+        parser.exit()
+
+    # get the command
+    command = args[0]
+    if command not in commands:
+        parser.error("Command must be one of %s (you gave '%s')" %
+                     (', '.join(sorted(commands.keys())), command))
+
+    handler = commands[command](parser)
+    handler(options, args[1:])
+
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/manifestparser/expression.py
@@ -0,0 +1,325 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import sys
+import traceback
+
+__all__ = ['parse', 'ParseError', 'ExpressionParser']
+
+# expr.py
+# from:
+# http://k0s.org/mozilla/hg/expressionparser
+# http://hg.mozilla.org/users/tmielczarek_mozilla.com/expressionparser
+
+# Implements a top-down parser/evaluator for simple boolean expressions.
+# ideas taken from http://effbot.org/zone/simple-top-down-parsing.htm
+#
+# Rough grammar:
+# expr := literal
+#       | '(' expr ')'
+#       | expr '&&' expr
+#       | expr '||' expr
+#       | expr '==' expr
+#       | expr '!=' expr
+#       | expr '<' expr
+#       | expr '>' expr
+#       | expr '<=' expr
+#       | expr '>=' expr
+# literal := BOOL
+#          | INT
+#          | STRING
+#          | IDENT
+# BOOL   := true|false
+# INT    := [0-9]+
+# STRING := "[^"]*"
+# IDENT  := [A-Za-z_]\w*
+
+# Identifiers take their values from a mapping dictionary passed as the second
+# argument.
+
+# Glossary (see above URL for details):
+# - nud: null denotation
+# - led: left detonation
+# - lbp: left binding power
+# - rbp: right binding power
+
+
+class ident_token(object):
+
+    def __init__(self, scanner, value):
+        self.value = value
+
+    def nud(self, parser):
+        # identifiers take their value from the value mappings passed
+        # to the parser
+        return parser.value(self.value)
+
+
+class literal_token(object):
+
+    def __init__(self, scanner, value):
+        self.value = value
+
+    def nud(self, parser):
+        return self.value
+
+
+class eq_op_token(object):
+    "=="
+
+    def led(self, parser, left):
+        return left == parser.expression(self.lbp)
+
+
+class neq_op_token(object):
+    "!="
+
+    def led(self, parser, left):
+        return left != parser.expression(self.lbp)
+
+
+class lt_op_token(object):
+    "<"
+
+    def led(self, parser, left):
+        return left < parser.expression(self.lbp)
+
+
+class gt_op_token(object):
+    ">"
+
+    def led(self, parser, left):
+        return left > parser.expression(self.lbp)
+
+
+class le_op_token(object):
+    "<="
+
+    def led(self, parser, left):
+        return left <= parser.expression(self.lbp)
+
+
+class ge_op_token(object):
+    ">="
+
+    def led(self, parser, left):
+        return left >= parser.expression(self.lbp)
+
+
+class not_op_token(object):
+    "!"
+
+    def nud(self, parser):
+        return not parser.expression(100)
+
+
+class and_op_token(object):
+    "&&"
+
+    def led(self, parser, left):
+        right = parser.expression(self.lbp)
+        return left and right
+
+
+class or_op_token(object):
+    "||"
+
+    def led(self, parser, left):
+        right = parser.expression(self.lbp)
+        return left or right
+
+
+class lparen_token(object):
+    "("
+
+    def nud(self, parser):
+        expr = parser.expression()
+        parser.advance(rparen_token)
+        return expr
+
+
+class rparen_token(object):
+    ")"
+
+
+class end_token(object):
+    """always ends parsing"""
+
+# derived literal tokens
+
+
+class bool_token(literal_token):
+
+    def __init__(self, scanner, value):
+        value = {'true': True, 'false': False}[value]
+        literal_token.__init__(self, scanner, value)
+
+
+class int_token(literal_token):
+
+    def __init__(self, scanner, value):
+        literal_token.__init__(self, scanner, int(value))
+
+
+class string_token(literal_token):
+
+    def __init__(self, scanner, value):
+        literal_token.__init__(self, scanner, value[1:-1])
+
+
+precedence = [(end_token, rparen_token),
+              (or_op_token,),
+              (and_op_token,),
+              (lt_op_token, gt_op_token, le_op_token, ge_op_token,
+               eq_op_token, neq_op_token),
+              (lparen_token,),
+              ]
+for index, rank in enumerate(precedence):
+    for token in rank:
+        token.lbp = index  # lbp = lowest left binding power
+
+
+class ParseError(Exception):
+    """error parsing conditional expression"""
+
+
+class ExpressionParser(object):
+    """
+    A parser for a simple expression language.
+
+    The expression language can be described as follows::
+
+        EXPRESSION ::= LITERAL | '(' EXPRESSION ')' | '!' EXPRESSION | EXPRESSION OP EXPRESSION
+        OP ::= '==' | '!=' | '<' | '>' | '<=' | '>=' | '&&' | '||'
+        LITERAL ::= BOOL | INT | IDENT | STRING
+        BOOL ::= 'true' | 'false'
+        INT ::= [0-9]+
+        IDENT ::= [a-zA-Z_]\w*
+        STRING ::= '"' [^\"] '"' | ''' [^\'] '''
+
+    At its core, expressions consist of booleans, integers, identifiers and.
+    strings. Booleans are one of *true* or *false*. Integers are a series
+    of digits. Identifiers are a series of English letters and underscores.
+    Strings are a pair of matching quote characters (single or double) with
+    zero or more characters inside.
+
+    Expressions can be combined with operators: the equals (==) and not
+    equals (!=) operators compare two expressions and produce a boolean. The
+    and (&&) and or (||) operators take two expressions and produce the logical
+    AND or OR value of them, respectively. An expression can also be prefixed
+    with the not (!) operator, which produces its logical negation.
+
+    Finally, any expression may be contained within parentheses for grouping.
+
+    Identifiers take their values from the mapping provided.
+    """
+
+    scanner = None
+
+    def __init__(self, text, valuemapping, strict=False):
+        """
+        Initialize the parser
+        :param text: The expression to parse as a string.
+        :param valuemapping: A dict mapping identifier names to values.
+        :param strict: If true, referencing an identifier that was not
+                       provided in :valuemapping: will raise an error.
+        """
+        self.text = text
+        self.valuemapping = valuemapping
+        self.strict = strict
+
+    def _tokenize(self):
+        """
+        Lex the input text into tokens and yield them in sequence.
+        """
+        if not ExpressionParser.scanner:
+            ExpressionParser.scanner = re.Scanner([
+                # Note: keep these in sync with the class docstring above.
+                (r"true|false", bool_token),
+                (r"[a-zA-Z_]\w*", ident_token),
+                (r"[0-9]+", int_token),
+                (r'("[^"]*")|(\'[^\']*\')', string_token),
+                (r"==", eq_op_token()),
+                (r"!=", neq_op_token()),
+                (r"<=", le_op_token()),
+                (r">=", ge_op_token()),
+                (r"<", lt_op_token()),
+                (r">", gt_op_token()),
+                (r"\|\|", or_op_token()),
+                (r"!", not_op_token()),
+                (r"&&", and_op_token()),
+                (r"\(", lparen_token()),
+                (r"\)", rparen_token()),
+                (r"\s+", None),  # skip whitespace
+            ])
+        tokens, remainder = ExpressionParser.scanner.scan(self.text)
+        for t in tokens:
+            yield t
+        yield end_token()
+
+    def value(self, ident):
+        """
+        Look up the value of |ident| in the value mapping passed in the
+        constructor.
+        """
+        if self.strict:
+            return self.valuemapping[ident]
+        else:
+            return self.valuemapping.get(ident, None)
+
+    def advance(self, expected):
+        """
+        Assert that the next token is an instance of |expected|, and advance
+        to the next token.
+        """
+        if not isinstance(self.token, expected):
+            raise Exception("Unexpected token!")
+        self.token = self.iter.next()
+
+    def expression(self, rbp=0):
+        """
+        Parse and return the value of an expression until a token with
+        right binding power greater than rbp is encountered.
+        """
+        t = self.token
+        self.token = self.iter.next()
+        left = t.nud(self)
+        while rbp < self.token.lbp:
+            t = self.token
+            self.token = self.iter.next()
+            left = t.led(self, left)
+        return left
+
+    def parse(self):
+        """
+        Parse and return the value of the expression in the text
+        passed to the constructor. Raises a ParseError if the expression
+        could not be parsed.
+        """
+        try:
+            self.iter = self._tokenize()
+            self.token = self.iter.next()
+            return self.expression()
+        except:
+            extype, ex, tb = sys.exc_info()
+            formatted = ''.join(traceback.format_exception_only(extype, ex))
+            raise ParseError("could not parse: "
+                             "%s\nexception: %svariables: %s" % (self.text,
+                                                                 formatted,
+                                                                 self.valuemapping)), None, tb
+
+    __call__ = parse
+
+
+def parse(text, **values):
+    """
+    Parse and evaluate a boolean expression.
+    :param text: The expression to parse, as a string.
+    :param values: A dict containing a name to value mapping for identifiers
+                   referenced in *text*.
+    :rtype: the final value of the expression.
+    :raises: :py:exc::ParseError: will be raised if parsing fails.
+    """
+    return ExpressionParser(text, values).parse()
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/manifestparser/filters.py
@@ -0,0 +1,421 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+A filter is a callable that accepts an iterable of test objects and a
+dictionary of values, and returns a new iterable of test objects. It is
+possible to define custom filters if the built-in ones are not enough.
+"""
+
+from collections import defaultdict, MutableSequence
+import itertools
+import os
+
+from .expression import (
+    parse,
+    ParseError,
+)
+
+
+# built-in filters
+
+def skip_if(tests, values):
+    """
+    Sets disabled on all tests containing the `skip-if` tag and whose condition
+    is True. This filter is added by default.
+    """
+    tag = 'skip-if'
+    for test in tests:
+        if tag in test and parse(test[tag], **values):
+            test.setdefault('disabled', '{}: {}'.format(tag, test[tag]))
+        yield test
+
+
+def run_if(tests, values):
+    """
+    Sets disabled on all tests containing the `run-if` tag and whose condition
+    is False. This filter is added by default.
+    """
+    tag = 'run-if'
+    for test in tests:
+        if tag in test and not parse(test[tag], **values):
+            test.setdefault('disabled', '{}: {}'.format(tag, test[tag]))
+        yield test
+
+
+def fail_if(tests, values):
+    """
+    Sets expected to 'fail' on all tests containing the `fail-if` tag and whose
+    condition is True. This filter is added by default.
+    """
+    tag = 'fail-if'
+    for test in tests:
+        if tag in test and parse(test[tag], **values):
+            test['expected'] = 'fail'
+        yield test
+
+
+def enabled(tests, values):
+    """
+    Removes all tests containing the `disabled` key. This filter can be
+    added by passing `disabled=False` into `active_tests`.
+    """
+    for test in tests:
+        if 'disabled' not in test:
+            yield test
+
+
+def exists(tests, values):
+    """
+    Removes all tests that do not exist on the file system. This filter is
+    added by default, but can be removed by passing `exists=False` into
+    `active_tests`.
+    """
+    for test in tests:
+        if os.path.exists(test['path']):
+            yield test
+
+
+# built-in instance filters
+
+class InstanceFilter(object):
+    """
+    Generally only one instance of a class filter should be applied at a time.
+    Two instances of `InstanceFilter` are considered equal if they have the
+    same class name. This ensures only a single instance is ever added to
+    `filterlist`. This class also formats filters' __str__ method for easier
+    debugging.
+    """
+    unique = True
+
+    def __init__(self, *args, **kwargs):
+        self.fmt_args = ', '.join(itertools.chain(
+            [str(a) for a in args],
+            ['{}={}'.format(k, v) for k, v in kwargs.iteritems()]))
+
+    def __eq__(self, other):
+        if self.unique:
+            return self.__class__ == other.__class__
+        return self.__hash__() == other.__hash__()
+
+    def __str__(self):
+        return "{}({})".format(self.__class__.__name__, self.fmt_args)
+
+
+class subsuite(InstanceFilter):
+    """
+    If `name` is None, removes all tests that have a `subsuite` key.
+    Otherwise removes all tests that do not have a subsuite matching `name`.
+
+    It is possible to specify conditional subsuite keys using:
+       subsuite = foo,condition
+
+    where 'foo' is the subsuite name, and 'condition' is the same type of
+    condition used for skip-if.  If the condition doesn't evaluate to true,
+    the subsuite designation will be removed from the test.
+
+    :param name: The name of the subsuite to run (default None)
+    """
+
+    def __init__(self, name=None):
+        InstanceFilter.__init__(self, name=name)
+        self.name = name
+
+    def __call__(self, tests, values):
+        # Look for conditional subsuites, and replace them with the subsuite
+        # itself (if the condition is true), or nothing.
+        for test in tests:
+            subsuite = test.get('subsuite', '')
+            if ',' in subsuite:
+                try:
+                    subsuite, cond = subsuite.split(',')
+                except ValueError:
+                    raise ParseError("subsuite condition can't contain commas")
+                matched = parse(cond, **values)
+                if matched:
+                    test['subsuite'] = subsuite
+                else:
+                    test['subsuite'] = ''
+
+            # Filter on current subsuite
+            if self.name is None:
+                if not test.get('subsuite'):
+                    yield test
+            else:
+                if test.get('subsuite', '') == self.name:
+                    yield test
+
+
+class chunk_by_slice(InstanceFilter):
+    """
+    Basic chunking algorithm that splits tests evenly across total chunks.
+
+    :param this_chunk: the current chunk, 1 <= this_chunk <= total_chunks
+    :param total_chunks: the total number of chunks
+    :param disabled: Whether to include disabled tests in the chunking
+                     algorithm. If False, each chunk contains an equal number
+                     of non-disabled tests. If True, each chunk contains an
+                     equal number of tests (default False)
+    """
+
+    def __init__(self, this_chunk, total_chunks, disabled=False):
+        assert 1 <= this_chunk <= total_chunks
+        InstanceFilter.__init__(self, this_chunk, total_chunks,
+                                disabled=disabled)
+        self.this_chunk = this_chunk
+        self.total_chunks = total_chunks
+        self.disabled = disabled
+
+    def __call__(self, tests, values):
+        tests = list(tests)
+        if self.disabled:
+            chunk_tests = tests[:]
+        else:
+            chunk_tests = [t for t in tests if 'disabled' not in t]
+
+        tests_per_chunk = float(len(chunk_tests)) / self.total_chunks
+        start = int(round((self.this_chunk - 1) * tests_per_chunk))
+        end = int(round(self.this_chunk * tests_per_chunk))
+
+        if not self.disabled:
+            # map start and end back onto original list of tests. Disabled
+            # tests will still be included in the returned list, but each
+            # chunk will contain an equal number of enabled tests.
+            if self.this_chunk == 1:
+                start = 0
+            elif start < len(chunk_tests):
+                start = tests.index(chunk_tests[start])
+
+            if self.this_chunk == self.total_chunks:
+                end = len(tests)
+            elif end < len(chunk_tests):
+                end = tests.index(chunk_tests[end])
+        return (t for t in tests[start:end])
+
+
+class chunk_by_dir(InstanceFilter):
+    """
+    Basic chunking algorithm that splits directories of tests evenly at a
+    given depth.
+
+    For example, a depth of 2 means all test directories two path nodes away
+    from the base are gathered, then split evenly across the total number of
+    chunks. The number of tests in each of the directories is not taken into
+    account (so chunks will not contain an even number of tests). All test
+    paths must be relative to the same root (typically the root of the source
+    repository).
+
+    :param this_chunk: the current chunk, 1 <= this_chunk <= total_chunks
+    :param total_chunks: the total number of chunks
+    :param depth: the minimum depth of a subdirectory before it will be
+                  considered unique
+    """
+
+    def __init__(self, this_chunk, total_chunks, depth):
+        InstanceFilter.__init__(self, this_chunk, total_chunks, depth)
+        self.this_chunk = this_chunk
+        self.total_chunks = total_chunks
+        self.depth = depth
+
+    def __call__(self, tests, values):
+        tests_by_dir = defaultdict(list)
+        ordered_dirs = []
+        for test in tests:
+            path = test['relpath']
+
+            if path.startswith(os.sep):
+                path = path[1:]
+
+            dirs = path.split(os.sep)
+            dirs = dirs[:min(self.depth, len(dirs) - 1)]
+            path = os.sep.join(dirs)
+
+            # don't count directories that only have disabled tests in them,
+            # but still yield disabled tests that are alongside enabled tests
+            if path not in ordered_dirs and 'disabled' not in test:
+                ordered_dirs.append(path)
+            tests_by_dir[path].append(test)
+
+        tests_per_chunk = float(len(ordered_dirs)) / self.total_chunks
+        start = int(round((self.this_chunk - 1) * tests_per_chunk))
+        end = int(round(self.this_chunk * tests_per_chunk))
+
+        for i in range(start, end):
+            for test in tests_by_dir.pop(ordered_dirs[i]):
+                yield test
+
+        # find directories that only contain disabled tests. They still need to
+        # be yielded for reporting purposes. Put them all in chunk 1 for
+        # simplicity.
+        if self.this_chunk == 1:
+            disabled_dirs = [v for k, v in tests_by_dir.iteritems()
+                             if k not in ordered_dirs]
+            for disabled_test in itertools.chain(*disabled_dirs):
+                yield disabled_test
+
+
+class chunk_by_runtime(InstanceFilter):
+    """
+    Chunking algorithm that attempts to group tests into chunks based on their
+    average runtimes. It keeps manifests of tests together and pairs slow
+    running manifests with fast ones.
+
+    :param this_chunk: the current chunk, 1 <= this_chunk <= total_chunks
+    :param total_chunks: the total number of chunks
+    :param runtimes: dictionary of test runtime data, of the form
+                     {<test path>: <average runtime>}
+    :param default_runtime: value in seconds to assign tests that don't exist
+                            in the runtimes file
+    """
+
+    def __init__(self, this_chunk, total_chunks, runtimes, default_runtime=0):
+        InstanceFilter.__init__(self, this_chunk, total_chunks, runtimes,
+                                default_runtime=default_runtime)
+        self.this_chunk = this_chunk
+        self.total_chunks = total_chunks
+
+        # defaultdict(lambda:<int>) assigns all non-existent keys the value of
+        # <int>. This means all tests we encounter that don't exist in the
+        # runtimes file will be assigned `default_runtime`.
+        self.runtimes = defaultdict(lambda: default_runtime)
+        self.runtimes.update(runtimes)
+
+    def __call__(self, tests, values):
+        tests = list(tests)
+        manifests = set(t['manifest'] for t in tests)
+
+        def total_runtime(tests):
+            return sum(self.runtimes[t['relpath']] for t in tests
+                       if 'disabled' not in t)
+
+        tests_by_manifest = []
+        for manifest in manifests:
+            mtests = [t for t in tests if t['manifest'] == manifest]
+            tests_by_manifest.append((total_runtime(mtests), mtests))
+        tests_by_manifest.sort(reverse=True)
+
+        tests_by_chunk = [[0, []] for i in range(self.total_chunks)]
+        for runtime, batch in tests_by_manifest:
+            # sort first by runtime, then by number of tests in case of a tie.
+            # This guarantees the chunk with the fastest runtime will always
+            # get the next batch of tests.
+            tests_by_chunk.sort(key=lambda x: (x[0], len(x[1])))
+            tests_by_chunk[0][0] += runtime
+            tests_by_chunk[0][1].extend(batch)
+
+        return (t for t in tests_by_chunk[self.this_chunk - 1][1])
+
+
+class tags(InstanceFilter):
+    """
+    Removes tests that don't contain any of the given tags. This overrides
+    InstanceFilter's __eq__ method, so multiple instances can be added.
+    Multiple tag filters is equivalent to joining tags with the AND operator.
+
+    To define a tag in a manifest, add a `tags` attribute to a test or DEFAULT
+    section. Tests can have multiple tags, in which case they should be
+    whitespace delimited. For example:
+
+    [test_foobar.html]
+    tags = foo bar
+
+    :param tags: A tag or list of tags to filter tests on
+    """
+    unique = False
+
+    def __init__(self, tags):
+        InstanceFilter.__init__(self, tags)
+        if isinstance(tags, basestring):
+            tags = [tags]
+        self.tags = tags
+
+    def __call__(self, tests, values):
+        for test in tests:
+            if 'tags' not in test:
+                continue
+
+            test_tags = [t.strip() for t in test['tags'].split()]
+            if any(t in self.tags for t in test_tags):
+                yield test
+
+
+class pathprefix(InstanceFilter):
+    """
+    Removes tests that don't start with any of the given test paths.
+
+    :param paths: A list of test paths to filter on
+    """
+
+    def __init__(self, paths):
+        InstanceFilter.__init__(self, paths)
+        if isinstance(paths, basestring):
+            paths = [paths]
+        self.paths = paths
+
+    def __call__(self, tests, values):
+        for test in tests:
+            for tp in self.paths:
+                tp = os.path.normpath(tp)
+
+                path = test['relpath']
+                if os.path.isabs(tp):
+                    path = test['path']
+
+                if not os.path.normpath(path).startswith(tp):
+                    continue
+
+                # any test path that points to a single file will be run no
+                # matter what, even if it's disabled
+                if 'disabled' in test and os.path.normpath(test['relpath']) == tp:
+                    del test['disabled']
+                yield test
+                break
+
+
+# filter container
+
+DEFAULT_FILTERS = (
+    skip_if,
+    run_if,
+    fail_if,
+)
+"""
+By default :func:`~.active_tests` will run the :func:`~.skip_if`,
+:func:`~.run_if` and :func:`~.fail_if` filters.
+"""
+
+
+class filterlist(MutableSequence):
+    """
+    A MutableSequence that raises TypeError when adding a non-callable and
+    ValueError if the item is already added.
+    """
+
+    def __init__(self, items=None):
+        self.items = []
+        if items:
+            self.items = list(items)
+
+    def _validate(self, item):
+        if not callable(item):
+            raise TypeError("Filters must be callable!")
+        if item in self:
+            raise ValueError("Filter {} is already applied!".format(item))
+
+    def __getitem__(self, key):
+        return self.items[key]
+
+    def __setitem__(self, key, value):
+        self._validate(value)
+        self.items[key] = value
+
+    def __delitem__(self, key):
+        del self.items[key]
+
+    def __len__(self):
+        return len(self.items)
+
+    def insert(self, index, value):
+        self._validate(value)
+        self.items.insert(index, value)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/manifestparser/ini.py
@@ -0,0 +1,171 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import sys
+
+__all__ = ['read_ini', 'combine_fields']
+
+
+class IniParseError(Exception):
+    def __init__(self, fp, linenum, msg):
+        if isinstance(fp, basestring):
+            path = fp
+        elif hasattr(fp, 'name'):
+            path = fp.name
+        else:
+            path = getattr(fp, 'path', 'unknown')
+        msg = "Error parsing manifest file '{}', line {}: {}".format(path, linenum, msg)
+        super(IniParseError, self).__init__(msg)
+
+
+def read_ini(fp, variables=None, default='DEFAULT', defaults_only=False,
+             comments=None, separators=None, strict=True, handle_defaults=True):
+    """
+    read an .ini file and return a list of [(section, values)]
+    - fp : file pointer or path to read
+    - variables : default set of variables
+    - default : name of the section for the default section
+    - defaults_only : if True, return the default section only
+    - comments : characters that if they start a line denote a comment
+    - separators : strings that denote key, value separation in order
+    - strict : whether to be strict about parsing
+    - handle_defaults : whether to incorporate defaults into each section
+    """
+
+    # variables
+    variables = variables or {}
+    comments = comments or ('#',)
+    separators = separators or ('=', ':')
+    sections = []
+    key = value = None
+    section_names = set()
+    if isinstance(fp, basestring):
+        fp = file(fp)
+
+    # read the lines
+    for (linenum, line) in enumerate(fp.read().splitlines(), start=1):
+
+        stripped = line.strip()
+
+        # ignore blank lines
+        if not stripped:
+            # reset key and value to avoid continuation lines
+            key = value = None
+            continue
+
+        # ignore comment lines
+        if any(stripped.startswith(c) for c in comments):
+            continue
+
+        # strip inline comments (borrowed from configparser)
+        comment_start = sys.maxsize
+        inline_prefixes = {p: -1 for p in comments}
+        while comment_start == sys.maxsize and inline_prefixes:
+            next_prefixes = {}
+            for prefix, index in inline_prefixes.items():
+                index = line.find(prefix, index+1)
+                if index == -1:
+                    continue
+                next_prefixes[prefix] = index
+                if index == 0 or (index > 0 and line[index-1].isspace()):
+                    comment_start = min(comment_start, index)
+            inline_prefixes = next_prefixes
+
+        if comment_start != sys.maxsize:
+            stripped = stripped[:comment_start].rstrip()
+
+        # check for a new section
+        if len(stripped) > 2 and stripped[0] == '[' and stripped[-1] == ']':
+            section = stripped[1:-1].strip()
+            key = value = key_indent = None
+
+            # deal with DEFAULT section
+            if section.lower() == default.lower():
+                if strict:
+                    assert default not in section_names
+                section_names.add(default)
+                current_section = variables
+                continue
+
+            if strict:
+                # make sure this section doesn't already exist
+                assert section not in section_names, "Section '%s' already found in '%s'" % (
+                    section, section_names)
+
+            section_names.add(section)
+            current_section = {}
+            sections.append((section, current_section))
+            continue
+
+        # if there aren't any sections yet, something bad happen
+        if not section_names:
+            raise IniParseError(fp, linenum, "Expected a comment or section, "
+                                             "instead found '{}'".format(stripped))
+
+        # continuation line ?
+        line_indent = len(line) - len(line.lstrip(' '))
+        if key and line_indent > key_indent:
+            value = '%s%s%s' % (value, os.linesep, stripped)
+            current_section[key] = value
+            continue
+
+        # (key, value) pair
+        for separator in separators:
+            if separator in stripped:
+                key, value = stripped.split(separator, 1)
+                key = key.strip()
+                value = value.strip()
+                key_indent = line_indent
+
+                if strict:
+                    # make sure this key isn't already in the section or empty
+                    assert key
+                    if current_section is not variables:
+                        assert key not in current_section
+
+                current_section[key] = value
+                break
+        else:
+            # something bad happened!
+            raise IniParseError(fp, linenum, "Unexpected line '{}'".format(stripped))
+
+    # server-root is a special os path declared relative to the manifest file.
+    # inheritance demands we expand it as absolute
+    if 'server-root' in variables:
+        root = os.path.join(os.path.dirname(fp.name),
+                            variables['server-root'])
+        variables['server-root'] = os.path.abspath(root)
+
+    # return the default section only if requested
+    if defaults_only:
+        return [(default, variables)]
+
+    global_vars = variables if handle_defaults else {}
+    sections = [(i, combine_fields(global_vars, j)) for i, j in sections]
+    return sections
+
+
+def combine_fields(global_vars, local_vars):
+    """
+    Combine the given manifest entries according to the semantics of specific fields.
+    This is used to combine manifest level defaults with a per-test definition.
+    """
+    if not global_vars:
+        return local_vars
+    if not local_vars:
+        return global_vars
+    field_patterns = {
+        'skip-if': '(%s) || (%s)',
+        'support-files': '%s %s',
+    }
+    final_mapping = global_vars.copy()
+    for field_name, value in local_vars.items():
+        if field_name not in field_patterns or field_name not in global_vars:
+            final_mapping[field_name] = value
+            continue
+        global_value = global_vars[field_name]
+        pattern = field_patterns[field_name]
+        final_mapping[field_name] = pattern % (global_value, value)
+    return final_mapping
new file mode 100755
--- /dev/null
+++ b/testing/mozharness/manifestparser/manifestparser.py
@@ -0,0 +1,807 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from StringIO import StringIO
+import json
+import fnmatch
+import os
+import shutil
+import sys
+import types
+
+from .ini import read_ini
+from .filters import (
+    DEFAULT_FILTERS,
+    enabled,
+    exists as _exists,
+    filterlist,
+)
+
+__all__ = ['ManifestParser', 'TestManifest', 'convert']
+
+relpath = os.path.relpath
+string = (basestring,)
+
+
+# path normalization
+
+def normalize_path(path):
+    """normalize a relative path"""
+    if sys.platform.startswith('win'):
+        return path.replace('/', os.path.sep)
+    return path
+
+
+def denormalize_path(path):
+    """denormalize a relative path"""
+    if sys.platform.startswith('win'):
+        return path.replace(os.path.sep, '/')
+    return path
+
+
+# objects for parsing manifests
+
+class ManifestParser(object):
+    """read .ini manifests"""
+
+    def __init__(self, manifests=(), defaults=None, strict=True, rootdir=None,
+                 finder=None, handle_defaults=True):
+        """Creates a ManifestParser from the given manifest files.
+
+        :param manifests: An iterable of file paths or file objects corresponding
+                          to manifests. If a file path refers to a manifest file that
+                          does not exist, an IOError is raised.
+        :param defaults: Variables to pre-define in the environment for evaluating
+                         expressions in manifests.
+        :param strict: If False, the provided manifests may contain references to
+                       listed (test) files that do not exist without raising an
+                       IOError during reading, and certain errors in manifests
+                       are not considered fatal. Those errors include duplicate
+                       section names, redefining variables, and defining empty
+                       variables.
+        :param rootdir: The directory used as the basis for conversion to and from
+                        relative paths during manifest reading.
+        :param finder: If provided, this finder object will be used for filesystem
+                       interactions. Finder objects are part of the mozpack package,
+                       documented at
+                       http://firefox-source-docs.mozilla.org/python/mozpack.html#module-mozpack.files
+        :param handle_defaults: If not set, do not propagate manifest defaults to individual
+                                test objects. Callers are expected to manage per-manifest
+                                defaults themselves via the manifest_defaults member
+                                variable in this case.
+        """
+        self._defaults = defaults or {}
+        self._ancestor_defaults = {}
+        self.tests = []
+        self.manifest_defaults = {}
+        self.source_files = set()
+        self.strict = strict
+        self.rootdir = rootdir
+        self.relativeRoot = None
+        self.finder = finder
+        self._handle_defaults = handle_defaults
+        if manifests:
+            self.read(*manifests)
+
+    def path_exists(self, path):
+        if self.finder:
+            return self.finder.get(path) is not None
+        return os.path.exists(path)
+
+    # methods for reading manifests
+
+    def _read(self, root, filename, defaults, defaults_only=False, parentmanifest=None):
+        """
+        Internal recursive method for reading and parsing manifests.
+        Stores all found tests in self.tests
+        :param root: The base path
+        :param filename: File object or string path for the base manifest file
+        :param defaults: Options that apply to all items
+        :param defaults_only: If True will only gather options, not include
+                              tests. Used for upstream parent includes
+                              (default False)
+        :param parentmanifest: Filename of the parent manifest (default None)
+        """
+        def read_file(type):
+            include_file = section.split(type, 1)[-1]
+            include_file = normalize_path(include_file)
+            if not os.path.isabs(include_file):
+                include_file = os.path.join(here, include_file)
+            if not self.path_exists(include_file):
+                message = "Included file '%s' does not exist" % include_file
+                if self.strict:
+                    raise IOError(message)
+                else:
+                    sys.stderr.write("%s\n" % message)
+                    return
+            return include_file
+
+        # get directory of this file if not file-like object
+        if isinstance(filename, string):
+            # If we're using mercurial as our filesystem via a finder
+            # during manifest reading, the getcwd() calls that happen
+            # with abspath calls will not be meaningful, so absolute
+            # paths are required.
+            if self.finder:
+                assert os.path.isabs(filename)
+            filename = os.path.abspath(filename)
+            self.source_files.add(filename)
+            if self.finder:
+                fp = self.finder.get(filename)
+            else:
+                fp = open(filename)
+            here = os.path.dirname(filename)
+        else:
+            fp = filename
+            filename = here = None
+        defaults['here'] = here
+
+        # Rootdir is needed for relative path calculation. Precompute it for
+        # the microoptimization used below.
+        if self.rootdir is None:
+            rootdir = ""
+        else:
+            assert os.path.isabs(self.rootdir)
+            rootdir = self.rootdir + os.path.sep
+
+        # read the configuration
+        sections = read_ini(fp=fp, variables=defaults, strict=self.strict,
+                            handle_defaults=self._handle_defaults)
+        self.manifest_defaults[filename] = defaults
+
+        parent_section_found = False
+
+        # get the tests
+        for section, data in sections:
+            # In case of defaults only, no other section than parent: has to
+            # be processed.
+            if defaults_only and not section.startswith('parent:'):
+                continue
+
+            # read the parent manifest if specified
+            if section.startswith('parent:'):
+                parent_section_found = True
+
+                include_file = read_file('parent:')
+                if include_file:
+                    self._read(root, include_file, {}, True)
+                continue
+
+            # a file to include
+            # TODO: keep track of included file structure:
+            # self.manifests = {'manifest.ini': 'relative/path.ini'}
+            if section.startswith('include:'):
+                include_file = read_file('include:')
+                if include_file:
+                    include_defaults = data.copy()
+                    self._read(root, include_file, include_defaults, parentmanifest=filename)
+                continue
+
+            # otherwise an item
+            # apply ancestor defaults, while maintaining current file priority
+            data = dict(self._ancestor_defaults.items() + data.items())
+
+            test = data
+            test['name'] = section
+
+            # Will be None if the manifest being read is a file-like object.
+            test['manifest'] = filename
+
+            # determine the path
+            path = test.get('path', section)
+            _relpath = path
+            if '://' not in path:  # don't futz with URLs
+                path = normalize_path(path)
+                if here and not os.path.isabs(path):
+                    # Profiling indicates 25% of manifest parsing is spent
+                    # in this call to normpath, but almost all calls return
+                    # their argument unmodified, so we avoid the call if
+                    # '..' if not present in the path.
+                    path = os.path.join(here, path)
+                    if '..' in path:
+                        path = os.path.normpath(path)
+
+                # Microoptimization, because relpath is quite expensive.
+                # We know that rootdir is an absolute path or empty. If path
+                # starts with rootdir, then path is also absolute and the tail
+                # of the path is the relative path (possibly non-normalized,
+                # when here is unknown).
+                # For this to work rootdir needs to be terminated with a path
+                # separator, so that references to sibling directories with
+                # a common prefix don't get misscomputed (e.g. /root and
+                # /rootbeer/file).
+                # When the rootdir is unknown, the relpath needs to be left
+                # unchanged. We use an empty string as rootdir in that case,
+                # which leaves relpath unchanged after slicing.
+                if path.startswith(rootdir):
+                    _relpath = path[len(rootdir):]
+                else:
+                    _relpath = relpath(path, rootdir)
+
+            test['path'] = path
+            test['relpath'] = _relpath
+
+            if parentmanifest is not None:
+                # If a test was included by a parent manifest we may need to
+                # indicate that in the test object for the sake of identifying
+                # a test, particularly in the case a test file is included by
+                # multiple manifests.
+                test['ancestor-manifest'] = parentmanifest
+
+            # append the item
+            self.tests.append(test)
+
+        # if no parent: section was found for defaults-only, only read the
+        # defaults section of the manifest without interpreting variables
+        if defaults_only and not parent_section_found:
+            sections = read_ini(fp=fp, variables=defaults, defaults_only=True,
+                                strict=self.strict)
+            (section, self._ancestor_defaults) = sections[0]
+
+    def read(self, *filenames, **defaults):
+        """
+        read and add manifests from file paths or file-like objects
+
+        filenames -- file paths or file-like objects to read as manifests
+        defaults -- default variables
+        """
+
+        # ensure all files exist
+        missing = [filename for filename in filenames
+                   if isinstance(filename, string) and not self.path_exists(filename)]
+        if missing:
+            raise IOError('Missing files: %s' % ', '.join(missing))
+
+        # default variables
+        _defaults = defaults.copy() or self._defaults.copy()
+        _defaults.setdefault('here', None)
+
+        # process each file
+        for filename in filenames:
+            # set the per file defaults
+            defaults = _defaults.copy()
+            here = None
+            if isinstance(filename, string):
+                here = os.path.dirname(os.path.abspath(filename))
+                defaults['here'] = here  # directory of master .ini file
+
+            if self.rootdir is None:
+                # set the root directory
+                # == the directory of the first manifest given
+                self.rootdir = here
+
+            self._read(here, filename, defaults)
+
+    # methods for querying manifests
+
+    def query(self, *checks, **kw):
+        """
+        general query function for tests
+        - checks : callable conditions to test if the test fulfills the query
+        """
+        tests = kw.get('tests', None)
+        if tests is None:
+            tests = self.tests
+        retval = []
+        for test in tests:
+            for check in checks:
+                if not check(test):
+                    break
+            else:
+                retval.append(test)
+        return retval
+
+    def get(self, _key=None, inverse=False, tags=None, tests=None, **kwargs):
+        # TODO: pass a dict instead of kwargs since you might hav
+        # e.g. 'inverse' as a key in the dict
+
+        # TODO: tags should just be part of kwargs with None values
+        # (None == any is kinda weird, but probably still better)
+
+        # fix up tags
+        if tags:
+            tags = set(tags)
+        else:
+            tags = set()
+
+        # make some check functions
+        if inverse:
+            def has_tags(test):
+                return not tags.intersection(test.keys())
+
+            def dict_query(test):
+                for key, value in kwargs.items():
+                    if test.get(key) == value:
+                        return False
+                return True
+        else:
+            def has_tags(test):
+                return tags.issubset(test.keys())
+
+            def dict_query(test):
+                for key, value in kwargs.items():
+                    if test.get(key) != value:
+                        return False
+                return True
+
+        # query the tests
+        tests = self.query(has_tags, dict_query, tests=tests)
+
+        # if a key is given, return only a list of that key
+        # useful for keys like 'name' or 'path'
+        if _key:
+            return [test[_key] for test in tests]
+
+        # return the tests
+        return tests
+
+    def manifests(self, tests=None):
+        """
+        return manifests in order in which they appear in the tests
+        """
+        if tests is None:
+            # Make sure to return all the manifests, even ones without tests.
+            return self.manifest_defaults.keys()
+
+        manifests = []
+        for test in tests:
+            manifest = test.get('manifest')
+            if not manifest:
+                continue
+            if manifest not in manifests:
+                manifests.append(manifest)
+        return manifests
+
+    def paths(self):
+        return [i['path'] for i in self.tests]
+
+    # methods for auditing
+
+    def missing(self, tests=None):
+        """
+        return list of tests that do not exist on the filesystem
+        """
+        if tests is None:
+            tests = self.tests
+        existing = list(_exists(tests, {}))
+        return [t for t in tests if t not in existing]
+
+    def check_missing(self, tests=None):
+        missing = self.missing(tests=tests)
+        if missing:
+            missing_paths = [test['path'] for test in missing]
+            if self.strict:
+                raise IOError("Strict mode enabled, test paths must exist. "
+                              "The following test(s) are missing: %s" %
+                              json.dumps(missing_paths, indent=2))
+            print >> sys.stderr, "Warning: The following test(s) are missing: %s" % \
+                json.dumps(missing_paths, indent=2)
+        return missing
+
+    def verifyDirectory(self, directories, pattern=None, extensions=None):
+        """
+        checks what is on the filesystem vs what is in a manifest
+        returns a 2-tuple of sets:
+        (missing_from_filesystem, missing_from_manifest)
+        """
+
+        files = set([])
+        if isinstance(directories, basestring):
+            directories = [directories]
+
+        # get files in directories
+        for directory in directories:
+            for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
+
+                # only add files that match a pattern
+                if pattern:
+                    filenames = fnmatch.filter(filenames, pattern)
+
+                # only add files that have one of the extensions
+                if extensions:
+                    filenames = [filename for filename in filenames
+                                 if os.path.splitext(filename)[-1] in extensions]
+
+                files.update([os.path.join(dirpath, filename) for filename in filenames])
+
+        paths = set(self.paths())
+        missing_from_filesystem = paths.difference(files)
+        missing_from_manifest = files.difference(paths)
+        return (missing_from_filesystem, missing_from_manifest)
+
+    # methods for output
+
+    def write(self, fp=sys.stdout, rootdir=None,
+              global_tags=None, global_kwargs=None,
+              local_tags=None, local_kwargs=None):
+        """
+        write a manifest given a query
+        global and local options will be munged to do the query
+        globals will be written to the top of the file
+        locals (if given) will be written per test
+        """
+
+        # open file if `fp` given as string
+        close = False
+        if isinstance(fp, string):
+            fp = file(fp, 'w')
+            close = True
+
+        # root directory
+        if rootdir is None:
+            rootdir = self.rootdir
+
+        # sanitize input
+        global_tags = global_tags or set()
+        local_tags = local_tags or set()
+        global_kwargs = global_kwargs or {}
+        local_kwargs = local_kwargs or {}
+
+        # create the query
+        tags = set([])
+        tags.update(global_tags)
+        tags.update(local_tags)
+        kwargs = {}
+        kwargs.update(global_kwargs)
+        kwargs.update(local_kwargs)
+
+        # get matching tests
+        tests = self.get(tags=tags, **kwargs)
+
+        # print the .ini manifest
+        if global_tags or global_kwargs:
+            print >> fp, '[DEFAULT]'
+            for tag in global_tags:
+                print >> fp, '%s =' % tag
+            for key, value in global_kwargs.items():
+                print >> fp, '%s = %s' % (key, value)
+            print >> fp
+
+        for test in tests:
+            test = test.copy()  # don't overwrite
+
+            path = test['name']
+            if not os.path.isabs(path):
+                path = test['path']
+                if self.rootdir:
+                    path = relpath(test['path'], self.rootdir)
+                path = denormalize_path(path)
+            print >> fp, '[%s]' % path
+
+            # reserved keywords:
+            reserved = ['path', 'name', 'here', 'manifest', 'relpath', 'ancestor-manifest']
+            for key in sorted(test.keys()):
+                if key in reserved:
+                    continue
+                if key in global_kwargs:
+                    continue
+                if key in global_tags and not test[key]:
+                    continue
+                print >> fp, '%s = %s' % (key, test[key])
+            print >> fp
+
+        if close:
+            # close the created file
+            fp.close()
+
+    def __str__(self):
+        fp = StringIO()
+        self.write(fp=fp)
+        value = fp.getvalue()
+        return value
+
+    def copy(self, directory, rootdir=None, *tags, **kwargs):
+        """
+        copy the manifests and associated tests
+        - directory : directory to copy to
+        - rootdir : root directory to copy to (if not given from manifests)
+        - tags : keywords the tests must have
+        - kwargs : key, values the tests must match
+        """
+        # XXX note that copy does *not* filter the tests out of the
+        # resulting manifest; it just stupidly copies them over.
+        # ideally, it would reread the manifests and filter out the
+        # tests that don't match *tags and **kwargs
+
+        # destination
+        if not os.path.exists(directory):
+            os.path.makedirs(directory)
+        else:
+            # sanity check
+            assert os.path.isdir(directory)
+
+        # tests to copy
+        tests = self.get(tags=tags, **kwargs)
+        if not tests:
+            return  # nothing to do!
+
+        # root directory
+        if rootdir is None:
+            rootdir = self.rootdir
+
+        # copy the manifests + tests
+        manifests = [relpath(manifest, rootdir) for manifest in self.manifests()]
+        for manifest in manifests:
+            destination = os.path.join(directory, manifest)
+            dirname = os.path.dirname(destination)
+            if not os.path.exists(dirname):
+                os.makedirs(dirname)
+            else:
+                # sanity check
+                assert os.path.isdir(dirname)
+            shutil.copy(os.path.join(rootdir, manifest), destination)
+
+        missing = self.check_missing(tests)
+        tests = [test for test in tests if test not in missing]
+        for test in tests:
+            if os.path.isabs(test['name']):
+                continue
+            source = test['path']
+            destination = os.path.join(directory, relpath(test['path'], rootdir))
+            shutil.copy(source, destination)
+            # TODO: ensure that all of the tests are below the from_dir
+
+    def update(self, from_dir, rootdir=None, *tags, **kwargs):
+        """
+        update the tests as listed in a manifest from a directory
+        - from_dir : directory where the tests live
+        - rootdir : root directory to copy to (if not given from manifests)
+        - tags : keys the tests must have
+        - kwargs : key, values the tests must match
+        """
+
+        # get the tests
+        tests = self.get(tags=tags, **kwargs)
+
+        # get the root directory
+        if not rootdir:
+            rootdir = self.rootdir
+
+        # copy them!
+        for test in tests:
+            if not os.path.isabs(test['name']):
+                _relpath = relpath(test['path'], rootdir)
+                source = os.path.join(from_dir, _relpath)
+                if not os.path.exists(source):
+                    message = "Missing test: '%s' does not exist!"
+                    if self.strict:
+                        raise IOError(message)
+                    print >> sys.stderr, message + " Skipping."
+                    continue
+                destination = os.path.join(rootdir, _relpath)
+                shutil.copy(source, destination)
+
+    # directory importers
+
+    @classmethod
+    def _walk_directories(cls, directories, callback, pattern=None, ignore=()):
+        """
+        internal function to import directories
+        """
+
+        if isinstance(pattern, basestring):
+            patterns = [pattern]
+        else:
+            patterns = pattern
+        ignore = set(ignore)
+
+        if not patterns:
+            def accept_filename(filename):
+                return True
+        else:
+            def accept_filename(filename):
+                for pattern in patterns:
+                    if fnmatch.fnmatch(filename, pattern):
+                        return True
+
+        if not ignore:
+            def accept_dirname(dirname):
+                return True
+        else:
+            def accept_dirname(dirname):
+                return dirname not in ignore
+
+        rootdirectories = directories[:]
+        seen_directories = set()
+        for rootdirectory in rootdirectories:
+            # let's recurse directories using list
+            directories = [os.path.realpath(rootdirectory)]
+            while directories:
+                directory = directories.pop(0)
+                if directory in seen_directories:
+                    # eliminate possible infinite recursion due to
+                    # symbolic links
+                    continue
+                seen_directories.add(directory)
+
+                files = []
+                subdirs = []
+                for name in sorted(os.listdir(directory)):
+                    path = os.path.join(directory, name)
+                    if os.path.isfile(path):
+                        # os.path.isfile follow symbolic links, we don't
+                        # need to handle them here.
+                        if accept_filename(name):
+                            files.append(name)
+                        continue
+                    elif os.path.islink(path):
+                        # eliminate symbolic links
+                        path = os.path.realpath(path)
+
+                    # we must have a directory here
+                    if accept_dirname(name):
+                        subdirs.append(name)
+                        # this subdir is added for recursion
+                        directories.insert(0, path)
+
+                # here we got all subdirs and files filtered, we can
+                # call the callback function if directory is not empty
+                if subdirs or files:
+                    callback(rootdirectory, directory, subdirs, files)
+
+    @classmethod
+    def populate_directory_manifests(cls, directories, filename, pattern=None, ignore=(),
+                                     overwrite=False):
+        """
+        walks directories and writes manifests of name `filename` in-place;
+        returns `cls` instance populated with the given manifests
+
+        filename -- filename of manifests to write
+        pattern -- shell pattern (glob) or patterns of filenames to match
+        ignore -- directory names to ignore
+        overwrite -- whether to overwrite existing files of given name
+        """
+
+        manifest_dict = {}
+
+        if os.path.basename(filename) != filename:
+            raise IOError("filename should not include directory name")
+
+        # no need to hit directories more than once
+        _directories = directories
+        directories = []
+        for directory in _directories:
+            if directory not in directories:
+                directories.append(directory)
+
+        def callback(directory, dirpath, dirnames, filenames):
+            """write a manifest for each directory"""
+
+            manifest_path = os.path.join(dirpath, filename)
+            if (dirnames or filenames) and not (os.path.exists(manifest_path) and overwrite):
+                with file(manifest_path, 'w') as manifest:
+                    for dirname in dirnames:
+                        print >> manifest, '[include:%s]' % os.path.join(dirname, filename)
+                    for _filename in filenames:
+                        print >> manifest, '[%s]' % _filename
+
+                # add to list of manifests
+                manifest_dict.setdefault(directory, manifest_path)
+
+        # walk the directories to gather files
+        cls._walk_directories(directories, callback, pattern=pattern, ignore=ignore)
+        # get manifests
+        manifests = [manifest_dict[directory] for directory in _directories]
+
+        # create a `cls` instance with the manifests
+        return cls(manifests=manifests)
+
+    @classmethod
+    def from_directories(cls, directories, pattern=None, ignore=(), write=None, relative_to=None):
+        """
+        convert directories to a simple manifest; returns ManifestParser instance
+
+        pattern -- shell pattern (glob) or patterns of filenames to match
+        ignore -- directory names to ignore
+        write -- filename or file-like object of manifests to write;
+                 if `None` then a StringIO instance will be created
+        relative_to -- write paths relative to this path;
+                       if false then the paths are absolute
+        """
+
+        # determine output
+        opened_manifest_file = None  # name of opened manifest file
+        absolute = not relative_to  # whether to output absolute path names as names
+        if isinstance(write, string):
+            opened_manifest_file = write
+            write = file(write, 'w')
+        if write is None:
+            write = StringIO()
+
+        # walk the directories, generating manifests
+        def callback(directory, dirpath, dirnames, filenames):
+
+            # absolute paths
+            filenames = [os.path.join(dirpath, filename)
+                         for filename in filenames]
+            # ensure new manifest isn't added
+            filenames = [filename for filename in filenames
+                         if filename != opened_manifest_file]
+            # normalize paths
+            if not absolute and relative_to:
+                filenames = [relpath(filename, relative_to)
+                             for filename in filenames]
+
+            # write to manifest
+            print >> write, '\n'.join(['[%s]' % denormalize_path(filename)
+                                       for filename in filenames])
+
+        cls._walk_directories(directories, callback, pattern=pattern, ignore=ignore)
+
+        if opened_manifest_file:
+            # close file
+            write.close()
+            manifests = [opened_manifest_file]
+        else:
+            # manifests/write is a file-like object;
+            # rewind buffer
+            write.flush()
+            write.seek(0)
+            manifests = [write]
+
+        # make a ManifestParser instance
+        return cls(manifests=manifests)
+
+
+convert = ManifestParser.from_directories
+
+
+class TestManifest(ManifestParser):
+    """
+    apply logic to manifests;  this is your integration layer :)
+    specific harnesses may subclass from this if they need more logic
+    """
+
+    def __init__(self, *args, **kwargs):
+        ManifestParser.__init__(self, *args, **kwargs)
+        self.filters = filterlist(DEFAULT_FILTERS)
+        self.last_used_filters = []
+
+    def active_tests(self, exists=True, disabled=True, filters=None, **values):
+        """
+        Run all applied filters on the set of tests.
+
+        :param exists: filter out non-existing tests (default True)
+        :param disabled: whether to return disabled tests (default True)
+        :param values: keys and values to filter on (e.g. `os = linux mac`)
+        :param filters: list of filters to apply to the tests
+        :returns: list of test objects that were not filtered out
+        """
+        tests = [i.copy() for i in self.tests]  # shallow copy
+
+        # mark all tests as passing
+        for test in tests:
+            test['expected'] = test.get('expected', 'pass')
+
+        # make a copy so original doesn't get modified
+        fltrs = self.filters[:]
+        if exists:
+            if self.strict:
+                self.check_missing(tests)
+            else:
+                fltrs.append(_exists)
+
+        if not disabled:
+            fltrs.append(enabled)
+
+        if filters:
+            fltrs += filters
+
+        self.last_used_filters = fltrs[:]
+        for fn in fltrs:
+            tests = fn(tests, values)
+        return list(tests)
+
+    def test_paths(self):
+        return [test['path'] for test in self.active_tests()]
+
+    def fmt_filters(self, filters=None):
+        filters = filters or self.last_used_filters
+        names = []
+        for f in filters:
+            if isinstance(f, types.FunctionType):
+                names.append(f.__name__)
+            else:
+                names.append(str(f))
+        return ', '.join(names)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozfile/__init__.py
@@ -0,0 +1,5 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from mozfile import *  # noqa
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozfile/mozfile.py
@@ -0,0 +1,373 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from contextlib import contextmanager
+import os
+import shutil
+import stat
+import tarfile
+import tempfile
+import urlparse
+import urllib2
+import zipfile
+import time
+
+__all__ = ['extract_tarball',
+           'extract_zip',
+           'extract',
+           'is_url',
+           'load',
+           'remove',
+           'rmtree',
+           'tree',
+           'NamedTemporaryFile',
+           'TemporaryDirectory']
+
+try:
+    WindowsError
+except NameError:
+    WindowsError = None  # so we can unconditionally catch it later...
+
+
+# utilities for extracting archives
+
+def extract_tarball(src, dest):
+    """extract a .tar file"""
+
+    with tarfile.open(src) as bundle:
+        namelist = []
+
+        for m in bundle:
+            bundle.extract(m, path=dest)
+            namelist.append(m.name)
+
+    return namelist
+
+
+def extract_zip(src, dest):
+    """extract a zip file"""
+
+    if isinstance(src, zipfile.ZipFile):
+        bundle = src
+    else:
+        try:
+            bundle = zipfile.ZipFile(src)
+        except Exception:
+            print "src: %s" % src
+            raise
+
+    namelist = bundle.namelist()
+
+    for name in namelist:
+        filename = os.path.realpath(os.path.join(dest, name))
+        if name.endswith('/'):
+            if not os.path.isdir(filename):
+                os.makedirs(filename)
+        else:
+            path = os.path.dirname(filename)
+            if not os.path.isdir(path):
+                os.makedirs(path)
+            _dest = open(filename, 'wb')
+            _dest.write(bundle.read(name))
+            _dest.close()
+        mode = bundle.getinfo(name).external_attr >> 16 & 0x1FF
+        os.chmod(filename, mode)
+    bundle.close()
+    return namelist
+
+
+def extract(src, dest=None):
+    """
+    Takes in a tar or zip file and extracts it to dest
+
+    If dest is not specified, extracts to os.path.dirname(src)
+
+    Returns the list of top level files that were extracted
+    """
+
+    assert os.path.exists(src), "'%s' does not exist" % src
+
+    if dest is None:
+        dest = os.path.dirname(src)
+    elif not os.path.isdir(dest):
+        os.makedirs(dest)
+    assert not os.path.isfile(dest), "dest cannot be a file"
+
+    if zipfile.is_zipfile(src):
+        namelist = extract_zip(src, dest)
+    elif tarfile.is_tarfile(src):
+        namelist = extract_tarball(src, dest)
+    else:
+        raise Exception("mozfile.extract: no archive format found for '%s'" %
+                        src)
+
+    # namelist returns paths with forward slashes even in windows
+    top_level_files = [os.path.join(dest, name.rstrip('/')) for name in namelist
+                       if len(name.rstrip('/').split('/')) == 1]
+
+    # namelist doesn't include folders, append these to the list
+    for name in namelist:
+        index = name.find('/')
+        if index != -1:
+            root = os.path.join(dest, name[:index])
+            if root not in top_level_files:
+                top_level_files.append(root)
+
+    return top_level_files
+
+
+# utilities for removal of files and directories
+
+def rmtree(dir):
+    """Deprecated wrapper method to remove a directory tree.
+
+    Ensure to update your code to use mozfile.remove() directly
+
+    :param dir: directory to be removed
+    """
+
+    return remove(dir)
+
+
+def remove(path):
+    """Removes the specified file, link, or directory tree
+
+    This is a replacement for shutil.rmtree that works better under
+    windows.
+
+    :param path: path to be removed
+    """
+
+    def _call_with_windows_retry(func, path, retry_max=5, retry_delay=0.5):
+        """
+        It's possible to see spurious errors on Windows due to various things
+        keeping a handle to the directory open (explorer, virus scanners, etc)
+        So we try a few times if it fails with a known error.
+        """
+        retry_count = 0
+        while True:
+            try:
+                func(path)
+                break
+            except WindowsError as e:
+                # Error   5 == Access is denied
+                # Error  32 == The process cannot access the file because it is
+                #              being used by another process
+                # Error 145 == The directory is not empty
+
+                if retry_count == retry_max or e.winerror not in [5, 32, 145]:
+                    raise
+                retry_count += 1
+
+                print 'Retrying to remove "%s" because it is in use.' % path
+                time.sleep(retry_delay)
+
+    if not os.path.exists(path):
+        return
+
+    path_stats = os.stat(path)
+
+    if os.path.isfile(path) or os.path.islink(path):
+        # Verify the file or link is read/write for the current user
+        os.chmod(path, path_stats.st_mode | stat.S_IRUSR | stat.S_IWUSR)
+        _call_with_windows_retry(os.remove, path)
+
+    elif os.path.isdir(path):
+        # Verify the directory is read/write/execute for the current user
+        os.chmod(path, path_stats.st_mode | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+        _call_with_windows_retry(shutil.rmtree, path)
+
+
+def depth(directory):
+    """returns the integer depth of a directory or path relative to '/' """
+
+    directory = os.path.abspath(directory)
+    level = 0
+    while True:
+        directory, remainder = os.path.split(directory)
+        level += 1
+        if not remainder:
+            break
+    return level
+
+
+# ASCII delimeters
+ascii_delimeters = {
+    'vertical_line': '|',
+    'item_marker': '+',
+    'last_child': '\\'
+    }
+
+# unicode delimiters
+unicode_delimeters = {
+    'vertical_line': '│',
+    'item_marker': '├',
+    'last_child': '└'
+    }
+
+
+def tree(directory,
+         item_marker=unicode_delimeters['item_marker'],
+         vertical_line=unicode_delimeters['vertical_line'],
+         last_child=unicode_delimeters['last_child'],
+         sort_key=lambda x: x.lower()):
+    """
+    display tree directory structure for `directory`
+    """
+
+    retval = []
+    indent = []
+    last = {}
+    top = depth(directory)
+
+    for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
+
+        abspath = os.path.abspath(dirpath)
+        basename = os.path.basename(abspath)
+        parent = os.path.dirname(abspath)
+        level = depth(abspath) - top
+
+        # sort articles of interest
+        for resource in (dirnames, filenames):
+            resource[:] = sorted(resource, key=sort_key)
+
+        files_end = item_marker
+
+        if level > len(indent):
+            indent.append(vertical_line)
+        indent = indent[:level]
+
+        if dirnames:
+            files_end = item_marker
+            last[abspath] = dirnames[-1]
+        else:
+            files_end = last_child
+
+        if last.get(parent) == os.path.basename(abspath):
+            # last directory of parent
+            dirpath_mark = last_child
+            indent[-1] = ' '
+        elif not indent:
+            dirpath_mark = ''
+        else:
+            dirpath_mark = item_marker
+
+        # append the directory and piece of tree structure
+        # if the top-level entry directory, print as passed
+        retval.append('%s%s%s' % (''.join(indent[:-1]),
+                      dirpath_mark, basename if retval else directory))
+        # add the files
+        if filenames:
+            last_file = filenames[-1]
+            retval.extend([('%s%s%s' % (''.join(indent),
+                          files_end if filename == last_file else item_marker, filename))
+                          for index, filename in enumerate(filenames)])
+
+    return '\n'.join(retval)
+
+
+# utilities for temporary resources
+
+class NamedTemporaryFile(object):
+    """
+    Like tempfile.NamedTemporaryFile except it works on Windows
+    in the case where you open the created file a second time.
+
+    This behaves very similarly to tempfile.NamedTemporaryFile but may
+    not behave exactly the same. For example, this function does not
+    prevent fd inheritance by children.
+
+    Example usage:
+
+    with NamedTemporaryFile() as fh:
+        fh.write(b'foobar')
+
+        print('Filename: %s' % fh.name)
+
+    see https://bugzilla.mozilla.org/show_bug.cgi?id=821362
+    """
+    def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='tmp',
+                 dir=None, delete=True):
+
+        fd, path = tempfile.mkstemp(suffix, prefix, dir, 't' in mode)
+        os.close(fd)
+
+        self.file = open(path, mode)
+        self._path = path
+        self._delete = delete
+        self._unlinked = False
+
+    def __getattr__(self, k):
+        return getattr(self.__dict__['file'], k)
+
+    def __iter__(self):
+        return self.__dict__['file']
+
+    def __enter__(self):
+        self.file.__enter__()
+        return self
+
+    def __exit__(self, exc, value, tb):
+        self.file.__exit__(exc, value, tb)
+        if self.__dict__['_delete']:
+            os.unlink(self.__dict__['_path'])
+            self._unlinked = True
+
+    def __del__(self):
+        if self.__dict__['_unlinked']:
+            return
+        self.file.__exit__(None, None, None)
+        if self.__dict__['_delete']:
+            os.unlink(self.__dict__['_path'])
+
+
+@contextmanager
+def TemporaryDirectory():
+    """
+    create a temporary directory using tempfile.mkdtemp, and then clean it up.
+
+    Example usage:
+    with TemporaryDirectory() as tmp:
+       open(os.path.join(tmp, "a_temp_file"), "w").write("data")
+
+    """
+    tempdir = tempfile.mkdtemp()
+    try:
+        yield tempdir
+    finally:
+        shutil.rmtree(tempdir)
+
+
+# utilities dealing with URLs
+
+def is_url(thing):
+    """
+    Return True if thing looks like a URL.
+    """
+
+    parsed = urlparse.urlparse(thing)
+    if 'scheme' in parsed:
+        return len(parsed.scheme) >= 2
+    else:
+        return len(parsed[0]) >= 2
+
+
+def load(resource):
+    """
+    open a file or URL for reading.  If the passed resource string is not a URL,
+    or begins with 'file://', return a ``file``.  Otherwise, return the
+    result of urllib2.urlopen()
+    """
+
+    # handle file URLs separately due to python stdlib limitations
+    if resource.startswith('file://'):
+        resource = resource[len('file://'):]
+
+    if not is_url(resource):
+        # if no scheme is given, it is a file path
+        return file(resource)
+
+    return urllib2.urlopen(resource)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozinfo/__init__.py
@@ -0,0 +1,57 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+interface to transform introspected system information to a format palatable to
+Mozilla
+
+Module variables:
+
+.. attribute:: bits
+
+   32 or 64
+
+.. attribute:: isBsd
+
+   Returns ``True`` if the operating system is BSD
+
+.. attribute:: isLinux
+
+   Returns ``True`` if the operating system is Linux
+
+.. attribute:: isMac
+
+   Returns ``True`` if the operating system is Mac
+
+.. attribute:: isWin
+
+   Returns ``True`` if the operating system is Windows
+
+.. attribute:: os
+
+   Operating system [``'win'``, ``'mac'``, ``'linux'``, ...]
+
+.. attribute:: processor
+
+   Processor architecture [``'x86'``, ``'x86_64'``, ``'ppc'``, ...]
+
+.. attribute:: version
+
+   Operating system version string. For windows, the service pack information is also included
+
+.. attribute:: info
+
+   Returns information identifying the current system.
+
+   * :attr:`bits`
+   * :attr:`os`
+   * :attr:`processor`
+   * :attr:`version`
+
+"""
+
+import mozinfo
+from mozinfo import *  # noqa
+
+__all__ = mozinfo.__all__
new file mode 100755
--- /dev/null
+++ b/testing/mozharness/mozinfo/mozinfo.py
@@ -0,0 +1,312 @@
+#!/usr/bin/env python
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# TODO: it might be a good idea of adding a system name (e.g. 'Ubuntu' for
+# linux) to the information; I certainly wouldn't want anyone parsing this
+# information and having behaviour depend on it
+
+from __future__ import absolute_import, print_function
+
+import os
+import platform
+import re
+import sys
+from .string_version import StringVersion
+from ctypes.util import find_library
+
+# keep a copy of the os module since updating globals overrides this
+_os = os
+
+
+class unknown(object):
+    """marker class for unknown information"""
+
+    def __nonzero__(self):
+        return False
+
+    def __str__(self):
+        return 'UNKNOWN'
+
+
+unknown = unknown()  # singleton
+
+
+def get_windows_version():
+    import ctypes
+
+    class OSVERSIONINFOEXW(ctypes.Structure):
+        _fields_ = [('dwOSVersionInfoSize', ctypes.c_ulong),
+                    ('dwMajorVersion', ctypes.c_ulong),
+                    ('dwMinorVersion', ctypes.c_ulong),
+                    ('dwBuildNumber', ctypes.c_ulong),
+                    ('dwPlatformId', ctypes.c_ulong),
+                    ('szCSDVersion', ctypes.c_wchar * 128),
+                    ('wServicePackMajor', ctypes.c_ushort),
+                    ('wServicePackMinor', ctypes.c_ushort),
+                    ('wSuiteMask', ctypes.c_ushort),
+                    ('wProductType', ctypes.c_byte),
+                    ('wReserved', ctypes.c_byte)]
+
+    os_version = OSVERSIONINFOEXW()
+    os_version.dwOSVersionInfoSize = ctypes.sizeof(os_version)
+    retcode = ctypes.windll.Ntdll.RtlGetVersion(ctypes.byref(os_version))
+    if retcode != 0:
+        raise OSError
+
+    return os_version.dwMajorVersion, os_version.dwMinorVersion, os_version.dwBuildNumber
+
+
+# get system information
+info = {'os': unknown,
+        'processor': unknown,
+        'version': unknown,
+        'os_version': unknown,
+        'bits': unknown,
+        'has_sandbox': unknown,
+        'webrender': bool(os.environ.get("MOZ_WEBRENDER", False))}
+(system, node, release, version, machine, processor) = platform.uname()
+(bits, linkage) = platform.architecture()
+
+# get os information and related data
+if system in ["Microsoft", "Windows"]:
+    info['os'] = 'win'
+    # There is a Python bug on Windows to determine platform values
+    # http://bugs.python.org/issue7860
+    if "PROCESSOR_ARCHITEW6432" in os.environ:
+        processor = os.environ.get("PROCESSOR_ARCHITEW6432", processor)
+    else:
+        processor = os.environ.get('PROCESSOR_ARCHITECTURE', processor)
+    system = os.environ.get("OS", system).replace('_', ' ')
+    (major, minor, _, _, service_pack) = os.sys.getwindowsversion()
+    info['service_pack'] = service_pack
+    if major >= 6 and minor >= 2:
+        # On windows >= 8.1 the system call that getwindowsversion uses has
+        # been frozen to always return the same values. In this case we call
+        # the RtlGetVersion API directly, which still provides meaningful
+        # values, at least for now.
+        major, minor, build_number = get_windows_version()
+        version = "%d.%d.%d" % (major, minor, build_number)
+
+    os_version = "%d.%d" % (major, minor)
+elif system.startswith(('MINGW', 'MSYS_NT')):
+    # windows/mingw python build (msys)
+    info['os'] = 'win'
+    os_version = version = unknown
+elif system == "Linux":
+    if hasattr(platform, "linux_distribution"):
+        (distro, os_version, codename) = platform.linux_distribution()
+    else:
+        (distro, os_version, codename) = platform.dist()
+    if not processor:
+        processor = machine
+    version = "%s %s" % (distro, os_version)
+
+    # Bug in Python 2's `platform` library:
+    # It will return a triple of empty strings if the distribution is not supported.
+    # It works on Python 3. If we don't have an OS version,
+    # the unit tests fail to run.
+    if not distro and not os_version and not codename:
+        distro = 'lfs'
+        version = release
+        os_version = release
+
+    info['os'] = 'linux'
+    info['linux_distro'] = distro
+elif system in ['DragonFly', 'FreeBSD', 'NetBSD', 'OpenBSD']:
+    info['os'] = 'bsd'
+    version = os_version = sys.platform
+elif system == "Darwin":
+    (release, versioninfo, machine) = platform.mac_ver()
+    version = "OS X %s" % release
+    versionNums = release.split('.')[:2]
+    os_version = "%s.%s" % (versionNums[0], versionNums[1])
+    info['os'] = 'mac'
+elif sys.platform in ('solaris', 'sunos5'):
+    info['os'] = 'unix'
+    os_version = version = sys.platform
+else:
+    os_version = version = unknown
+
+info['version'] = version
+info['os_version'] = StringVersion(os_version)
+
+# processor type and bits
+if processor in ["i386", "i686"]:
+    if bits == "32bit":
+        processor = "x86"
+    elif bits == "64bit":
+        processor = "x86_64"
+elif processor.upper() == "AMD64":
+    bits = "64bit"
+    processor = "x86_64"
+elif processor == "Power Macintosh":
+    processor = "ppc"
+bits = re.search('(\d+)bit', bits).group(1)
+info.update({'processor': processor,
+             'bits': int(bits),
+             })
+
+if info['os'] == 'linux':
+    import ctypes
+    import errno
+    PR_SET_SECCOMP = 22
+    SECCOMP_MODE_FILTER = 2
+    ctypes.CDLL(find_library("c"), use_errno=True).prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, 0)
+    info['has_sandbox'] = ctypes.get_errno() == errno.EFAULT
+else:
+    info['has_sandbox'] = True
+
+# standard value of choices, for easy inspection
+choices = {'os': ['linux', 'bsd', 'win', 'mac', 'unix'],
+           'bits': [32, 64],
+           'processor': ['x86', 'x86_64', 'ppc']}
+
+
+def sanitize(info):
+    """Do some sanitization of input values, primarily
+    to handle universal Mac builds."""
+    if "processor" in info and info["processor"] == "universal-x86-x86_64":
+        # If we're running on OS X 10.6 or newer, assume 64-bit
+        if release[:4] >= "10.6":  # Note this is a string comparison
+            info["processor"] = "x86_64"
+            info["bits"] = 64
+        else:
+            info["processor"] = "x86"
+            info["bits"] = 32
+
+# method for updating information
+
+
+def update(new_info):
+    """
+    Update the info.
+
+    :param new_info: Either a dict containing the new info or a path/url
+                     to a json file containing the new info.
+    """
+
+    PY3 = sys.version_info[0] == 3
+    if PY3:
+        string_types = str,
+    else:
+        string_types = basestring,
+    if isinstance(new_info, string_types):
+        # lazy import
+        import mozfile
+        import json
+        f = mozfile.load(new_info)
+        new_info = json.loads(f.read())
+        f.close()
+
+    info.update(new_info)
+    sanitize(info)
+    globals().update(info)
+
+    # convenience data for os access
+    for os_name in choices['os']:
+        globals()['is' + os_name.title()] = info['os'] == os_name
+    # unix is special
+    if isLinux or isBsd:  # noqa
+        globals()['isUnix'] = True
+
+
+def find_and_update_from_json(*dirs):
+    """
+    Find a mozinfo.json file, load it, and update the info with the
+    contents.
+
+    :param dirs: Directories in which to look for the file. They will be
+                 searched after first looking in the root of the objdir
+                 if the current script is being run from a Mozilla objdir.
+
+    Returns the full path to mozinfo.json if it was found, or None otherwise.
+    """
+    # First, see if we're in an objdir
+    try:
+        from mozbuild.base import MozbuildObject, BuildEnvironmentNotFoundException
+        from mozbuild.mozconfig import MozconfigFindException
+        build = MozbuildObject.from_environment()
+        json_path = _os.path.join(build.topobjdir, "mozinfo.json")
+        if _os.path.isfile(json_path):
+            update(json_path)
+            return json_path
+    except ImportError:
+        pass
+    except (BuildEnvironmentNotFoundException, MozconfigFindException):
+        pass
+
+    for d in dirs:
+        d = _os.path.abspath(d)
+        json_path = _os.path.join(d, "mozinfo.json")
+        if _os.path.isfile(json_path):
+            update(json_path)
+            return json_path
+
+    return None
+
+
+def output_to_file(path):
+    import json
+    with open(path, 'w') as f:
+        f.write(json.dumps(info))
+
+
+update({})
+
+# exports
+__all__ = list(info.keys())
+__all__ += ['is' + os_name.title() for os_name in choices['os']]
+__all__ += [
+    'info',
+    'unknown',
+    'main',
+    'choices',
+    'update',
+    'find_and_update_from_json',
+    'output_to_file',
+    'StringVersion',
+]
+
+
+def main(args=None):
+
+    # parse the command line
+    from optparse import OptionParser
+    parser = OptionParser(description=__doc__)
+    for key in choices:
+        parser.add_option('--%s' % key, dest=key,
+                          action='store_true', default=False,
+                          help="display choices for %s" % key)
+    options, args = parser.parse_args()
+
+    # args are JSON blobs to override info
+    if args:
+        # lazy import
+        import json
+        for arg in args:
+            if _os.path.exists(arg):
+                string = open(arg).read()
+            else:
+                string = arg
+            update(json.loads(string))
+
+    # print out choices if requested
+    flag = False
+    for key, value in options.__dict__.items():
+        if value is True:
+            print('%s choices: %s' % (key, ' '.join([str(choice)
+                                                     for choice in choices[key]])))
+            flag = True
+    if flag:
+        return
+
+    # otherwise, print out all info
+    for key, value in info.items():
+        print('%s: %s' % (key, value))
+
+
+if __name__ == '__main__':
+    main()
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozinfo/string_version.py
@@ -0,0 +1,45 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from distutils.version import LooseVersion
+
+
+class StringVersion(str):
+    """
+    A string version that can be compared with comparison operators.
+    """
+
+    def __init__(self, vstring):
+        str.__init__(self, vstring)
+        self.version = LooseVersion(vstring)
+
+    def __repr__(self):
+        return "StringVersion ('%s')" % self
+
+    def __to_version(self, other):
+        if not isinstance(other, StringVersion):
+            other = StringVersion(other)
+        return other.version
+
+    # rich comparison methods
+
+    def __lt__(self, other):
+        return self.version < self.__to_version(other)
+
+    def __le__(self, other):
+        return self.version <= self.__to_version(other)
+
+    def __eq__(self, other):
+        return self.version == self.__to_version(other)
+
+    def __ne__(self, other):
+        return self.version != self.__to_version(other)
+
+    def __gt__(self, other):
+        return self.version > self.__to_version(other)
+
+    def __ge__(self, other):
+        return self.version >= self.__to_version(other)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozprocess/__init__.py
@@ -0,0 +1,8 @@
+# flake8: noqa
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from .processhandler import *
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozprocess/processhandler.py
@@ -0,0 +1,1105 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+import errno
+import os
+import signal
+import subprocess
+import sys
+import threading
+import time
+import traceback
+from Queue import Queue, Empty
+from datetime import datetime
+
+__all__ = ['ProcessHandlerMixin', 'ProcessHandler', 'LogOutput',
+           'StoreOutput', 'StreamOutput']
+
+# Set the MOZPROCESS_DEBUG environment variable to 1 to see some debugging output
+MOZPROCESS_DEBUG = os.getenv("MOZPROCESS_DEBUG")
+
+# We dont use mozinfo because it is expensive to import, see bug 933558.
+isWin = os.name == "nt"
+isPosix = os.name == "posix"  # includes MacOS X
+
+if isWin:
+    from ctypes import sizeof, addressof, c_ulong, byref, WinError, c_longlong
+    from . import winprocess
+    from .qijo import JobObjectAssociateCompletionPortInformation,\
+        JOBOBJECT_ASSOCIATE_COMPLETION_PORT, JobObjectExtendedLimitInformation,\
+        JOBOBJECT_BASIC_LIMIT_INFORMATION, JOBOBJECT_EXTENDED_LIMIT_INFORMATION, IO_COUNTERS
+
+
+class ProcessHandlerMixin(object):
+    """
+    A class for launching and manipulating local processes.
+
+    :param cmd: command to run. May be a string or a list. If specified as a list, the first
+      element will be interpreted as the command, and all additional elements will be interpreted
+      as arguments to that command.
+    :param args: list of arguments to pass to the command (defaults to None). Must not be set when
+      `cmd` is specified as a list.
+    :param cwd: working directory for command (defaults to None).
+    :param env: is the environment to use for the process (defaults to os.environ).
+    :param ignore_children: causes system to ignore child processes when True,
+      defaults to False (which tracks child processes).
+    :param kill_on_timeout: when True, the process will be killed when a timeout is reached.
+      When False, the caller is responsible for killing the process.
+      Failure to do so could cause a call to wait() to hang indefinitely. (Defaults to True.)
+    :param processOutputLine: function or list of functions to be called for
+        each line of output produced by the process (defaults to an empty
+        list).
+    :param processStderrLine: function or list of functions to be called
+        for each line of error output - stderr - produced by the process
+        (defaults to an empty list). If this is not specified, stderr lines
+        will be sent to the *processOutputLine* callbacks.
+    :param onTimeout: function or list of functions to be called when the process times out.
+    :param onFinish: function or list of functions to be called when the process terminates
+      normally without timing out.
+    :param kwargs: additional keyword args to pass directly into Popen.
+
+    NOTE: Child processes will be tracked by default.  If for any reason
+    we are unable to track child processes and ignore_children is set to False,
+    then we will fall back to only tracking the root process.  The fallback
+    will be logged.
+    """
+
+    class Process(subprocess.Popen):
+        """
+        Represents our view of a subprocess.
+        It adds a kill() method which allows it to be stopped explicitly.
+        """
+
+        MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY = 180
+        MAX_PROCESS_KILL_DELAY = 30
+        TIMEOUT_BEFORE_SIGKILL = 1.0
+
+        def __init__(self,
+                     args,
+                     bufsize=0,
+                     executable=None,
+                     stdin=None,
+                     stdout=None,
+                     stderr=None,
+                     preexec_fn=None,
+                     close_fds=False,
+                     shell=False,
+                     cwd=None,
+                     env=None,
+                     universal_newlines=False,
+                     startupinfo=None,
+                     creationflags=0,
+                     ignore_children=False):
+
+            # Parameter for whether or not we should attempt to track child processes
+            self._ignore_children = ignore_children
+
+            if not self._ignore_children and not isWin:
+                # Set the process group id for linux systems
+                # Sets process group id to the pid of the parent process
+                # NOTE: This prevents you from using preexec_fn and managing
+                #       child processes, TODO: Ideally, find a way around this
+                def setpgidfn():
+                    os.setpgid(0, 0)
+                preexec_fn = setpgidfn
+
+            try:
+                subprocess.Popen.__init__(self, args, bufsize, executable,
+                                          stdin, stdout, stderr,
+                                          preexec_fn, close_fds,
+                                          shell, cwd, env,
+                                          universal_newlines, startupinfo, creationflags)
+            except OSError:
+                print >> sys.stderr, args
+                raise
+
+        def debug(self, msg):
+            if not MOZPROCESS_DEBUG:
+                return
+            thread = threading.current_thread().name
+            print("DBG::MOZPROC PID:{} ({}) | {}".format(self.pid, thread, msg))
+
+        def __del__(self, _maxint=sys.maxint):
+            if isWin:
+                handle = getattr(self, '_handle', None)
+                if handle:
+                    if hasattr(self, '_internal_poll'):
+                        self._internal_poll(_deadstate=_maxint)
+                    else:
+                        self.poll(_deadstate=sys.maxint)
+                if handle or self._job or self._io_port:
+                    self._cleanup()
+            else:
+                subprocess.Popen.__del__(self)
+
+        def kill(self, sig=None):
+            if isWin:
+                if not self._ignore_children and self._handle and self._job:
+                    self.debug("calling TerminateJobObject")
+                    winprocess.TerminateJobObject(self._job, winprocess.ERROR_CONTROL_C_EXIT)
+                    self.returncode = winprocess.GetExitCodeProcess(self._handle)
+                elif self._handle:
+                    self.debug("calling TerminateProcess")
+                    try:
+                        winprocess.TerminateProcess(self._handle, winprocess.ERROR_CONTROL_C_EXIT)
+                    except:
+                        traceback.print_exc()
+                        raise OSError("Could not terminate process")
+                    finally:
+                        winprocess.GetExitCodeProcess(self._handle)
+                        self._cleanup()
+            else:
+                def send_sig(sig, retries=0):
+                    pid = self.detached_pid or self.pid
+                    if not self._ignore_children:
+                        try:
+                            os.killpg(pid, sig)
+                        except BaseException as e:
+                            # On Mac OSX if the process group contains zombie
+                            # processes, killpg results in an EPERM.
+                            # In this case, zombie processes need to be reaped
+                            # before continuing
+                            # Note: A negative pid refers to the entire process
+                            # group
+                            if retries < 1 and getattr(e, "errno", None) == errno.EPERM:
+                                try:
+                                    os.waitpid(-pid, 0)
+                                finally:
+                                    return send_sig(sig, retries + 1)
+
+                            # ESRCH is a "no such process" failure, which is fine because the
+                            # application might already have been terminated itself. Any other
+                            # error would indicate a problem in killing the process.
+                            if getattr(e, "errno", None) != errno.ESRCH:
+                                print >> sys.stderr, "Could not terminate process: %s" % self.pid
+                                raise
+                    else:
+                        os.kill(pid, sig)
+
+                if sig is None and isPosix:
+                    # ask the process for termination and wait a bit
+                    send_sig(signal.SIGTERM)
+                    limit = time.time() + self.TIMEOUT_BEFORE_SIGKILL
+                    while time.time() <= limit:
+                        if self.poll() is not None:
+                            # process terminated nicely
+                            break
+                        time.sleep(0.02)
+                    else:
+                        # process did not terminate - send SIGKILL to force
+                        send_sig(signal.SIGKILL)
+                else:
+                    # a signal was explicitly set or not posix
+                    send_sig(sig or signal.SIGKILL)
+
+            self.returncode = self.wait()
+            self._cleanup()
+            return self.returncode
+
+        def poll(self):
+            """ Popen.poll
+                Check if child process has terminated. Set and return returncode attribute.
+            """
+            # If we have a handle, the process is alive
+            if isWin and getattr(self, '_handle', None):
+                return None
+
+            return subprocess.Popen.poll(self)
+
+        def wait(self):
+            """ Popen.wait
+                Called to wait for a running process to shut down and return
+                its exit code
+                Returns the main process's exit code
+            """
+            # This call will be different for each OS
+            self.returncode = self._wait()
+            self._cleanup()
+            return self.returncode
+
+        """ Private Members of Process class """
+
+        if isWin:
+            # Redefine the execute child so that we can track process groups
+            def _execute_child(self, *args_tuple):
+                # workaround for bug 950894
+                if sys.hexversion < 0x02070600:  # prior to 2.7.6
+                    (args, executable, preexec_fn, close_fds,
+                     cwd, env, universal_newlines, startupinfo,
+                     creationflags, shell,
+                     p2cread, p2cwrite,
+                     c2pread, c2pwrite,
+                     errread, errwrite) = args_tuple
+                    to_close = set()
+                else:  # 2.7.6 and later
+                    (args, executable, preexec_fn, close_fds,
+                     cwd, env, universal_newlines, startupinfo,
+                     creationflags, shell, to_close,
+                     p2cread, p2cwrite,
+                     c2pread, c2pwrite,
+                     errread, errwrite) = args_tuple
+                if not isinstance(args, basestring):
+                    args = subprocess.list2cmdline(args)
+
+                # Always or in the create new process group
+                creationflags |= winprocess.CREATE_NEW_PROCESS_GROUP
+
+                if startupinfo is None:
+                    startupinfo = winprocess.STARTUPINFO()
+
+                if None not in (p2cread, c2pwrite, errwrite):
+                    startupinfo.dwFlags |= winprocess.STARTF_USESTDHANDLES
+                    startupinfo.hStdInput = int(p2cread)
+                    startupinfo.hStdOutput = int(c2pwrite)
+                    startupinfo.hStdError = int(errwrite)
+                if shell:
+                    startupinfo.dwFlags |= winprocess.STARTF_USESHOWWINDOW
+                    startupinfo.wShowWindow = winprocess.SW_HIDE
+                    comspec = os.environ.get("COMSPEC", "cmd.exe")
+                    args = comspec + " /c " + args
+
+                # Determine if we can create a job or create nested jobs.
+                can_create_job = winprocess.CanCreateJobObject()
+                can_nest_jobs = self._can_nest_jobs()
+
+                # Ensure we write a warning message if we are falling back
+                if not (can_create_job or can_nest_jobs) and not self._ignore_children:
+                    # We can't create job objects AND the user wanted us to
+                    # Warn the user about this.
+                    print >> sys.stderr, \
+                        "ProcessManager UNABLE to use job objects to manage child processes"
+
+                # set process creation flags
+                creationflags |= winprocess.CREATE_SUSPENDED
+                creationflags |= winprocess.CREATE_UNICODE_ENVIRONMENT
+                if can_create_job:
+                    creationflags |= winprocess.CREATE_BREAKAWAY_FROM_JOB
+                if not (can_create_job or can_nest_jobs):
+                    # Since we've warned, we just log info here to inform you
+                    # of the consequence of setting ignore_children = True
+                    print "ProcessManager NOT managing child processes"
+
+                # create the process
+                hp, ht, pid, tid = winprocess.CreateProcess(
+                    executable, args,
+                    None, None,  # No special security
+                    1,  # Must inherit handles!
+                    creationflags,
+                    winprocess.EnvironmentBlock(env),
+                    cwd, startupinfo)
+                self._child_created = True
+                self._handle = hp
+                self._thread = ht
+                self.pid = pid
+                self.tid = tid
+
+                if not self._ignore_children and (can_create_job or can_nest_jobs):
+                    try:
+                        # We create a new job for this process, so that we can kill
+                        # the process and any sub-processes
+                        # Create the IO Completion Port
+                        self._io_port = winprocess.CreateIoCompletionPort()
+                        self._job = winprocess.CreateJobObject()
+
+                        # Now associate the io comp port and the job object
+                        joacp = JOBOBJECT_ASSOCIATE_COMPLETION_PORT(winprocess.COMPKEY_JOBOBJECT,
+                                                                    self._io_port)
+                        winprocess.SetInformationJobObject(
+                            self._job,
+                            JobObjectAssociateCompletionPortInformation,
+                            addressof(joacp),
+                            sizeof(joacp)
+                        )
+
+                        # Allow subprocesses to break away from us - necessary for
+                        # flash with protected mode
+                        jbli = JOBOBJECT_BASIC_LIMIT_INFORMATION(
+                            c_longlong(0),  # per process time limit (ignored)
+                            c_longlong(0),  # per job user time limit (ignored)
+                            winprocess.JOB_OBJECT_LIMIT_BREAKAWAY_OK,
+                            0,  # min working set (ignored)
+                            0,  # max working set (ignored)
+                            0,  # active process limit (ignored)
+                            None,  # affinity (ignored)
+                            0,  # Priority class (ignored)
+                            0,  # Scheduling class (ignored)
+                        )
+
+                        iocntr = IO_COUNTERS()
+                        jeli = JOBOBJECT_EXTENDED_LIMIT_INFORMATION(
+                            jbli,  # basic limit info struct
+                            iocntr,    # io_counters (ignored)
+                            0,    # process mem limit (ignored)
+                            0,    # job mem limit (ignored)
+                            0,    # peak process limit (ignored)
+                            0)    # peak job limit (ignored)
+
+                        winprocess.SetInformationJobObject(self._job,
+                                                           JobObjectExtendedLimitInformation,
+                                                           addressof(jeli),
+                                                           sizeof(jeli)
+                                                           )
+
+                        # Assign the job object to the process
+                        winprocess.AssignProcessToJobObject(self._job, int(hp))
+
+                        # It's overkill, but we use Queue to signal between threads
+                        # because it handles errors more gracefully than event or condition.
+                        self._process_events = Queue()
+
+                        # Spin up our thread for managing the IO Completion Port
+                        self._procmgrthread = threading.Thread(target=self._procmgr)
+                    except:
+                        print >> sys.stderr, """Exception trying to use job objects;
+falling back to not using job objects for managing child processes"""
+                        tb = traceback.format_exc()
+                        print >> sys.stderr, tb
+                        # Ensure no dangling handles left behind
+                        self._cleanup_job_io_port()
+                else:
+                    self._job = None
+
+                winprocess.ResumeThread(int(ht))
+                if getattr(self, '_procmgrthread', None):
+                    self._procmgrthread.start()
+                ht.Close()
+
+                for i in (p2cread, c2pwrite, errwrite):
+                    if i is not None:
+                        i.Close()
+
+            # Per:
+            # https://msdn.microsoft.com/en-us/library/windows/desktop/hh448388%28v=vs.85%29.aspx
+            # Nesting jobs came in with windows versions starting with 6.2 according to the table
+            # on this page:
+            # https://msdn.microsoft.com/en-us/library/ms724834%28v=vs.85%29.aspx
+            def _can_nest_jobs(self):
+                winver = sys.getwindowsversion()
+                return (winver.major > 6 or
+                        winver.major == 6 and winver.minor >= 2)
+
+            # Windows Process Manager - watches the IO Completion Port and
+            # keeps track of child processes
+            def _procmgr(self):
+                if not (self._io_port) or not (self._job):
+                    return
+
+                try:
+                    self._poll_iocompletion_port()
+                except KeyboardInterrupt:
+                    raise KeyboardInterrupt
+
+            def _poll_iocompletion_port(self):
+                # Watch the IO Completion port for status
+                self._spawned_procs = {}
+                countdowntokill = 0
+
+                self.debug("start polling IO completion port")
+
+                while True:
+                    msgid = c_ulong(0)
+                    compkey = c_ulong(0)
+                    pid = c_ulong(0)
+                    portstatus = winprocess.GetQueuedCompletionStatus(self._io_port,
+                                                                      byref(msgid),
+                                                                      byref(compkey),
+                                                                      byref(pid),
+                                                                      5000)
+
+                    # If the countdowntokill has been activated, we need to check
+                    # if we should start killing the children or not.
+                    if countdowntokill != 0:
+                        diff = datetime.now() - countdowntokill
+                        # Arbitrarily wait 3 minutes for windows to get its act together
+                        # Windows sometimes takes a small nap between notifying the
+                        # IO Completion port and actually killing the children, and we
+                        # don't want to mistake that situation for the situation of an unexpected
+                        # parent abort (which is what we're looking for here).
+                        if diff.seconds > self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY:
+                            print >> sys.stderr, \
+                                "WARNING | IO Completion Port failed to signal process shutdown"
+                            print >> sys.stderr, \
+                                "Parent process %s exited with children alive:" % self.pid
+                            print >> sys.stderr, \
+                                "PIDS: %s" % ', '.join([str(i) for i in self._spawned_procs])
+                            print >> sys.stderr, \
+                                "Attempting to kill them, but no guarantee of success"
+
+                            self.kill()
+                            self._process_events.put({self.pid: 'FINISHED'})
+                            break
+
+                    if not portstatus:
+                        # Check to see what happened
+                        errcode = winprocess.GetLastError()
+                        if errcode == winprocess.ERROR_ABANDONED_WAIT_0:
+                            # Then something has killed the port, break the loop
+                            print >> sys.stderr, "IO Completion Port unexpectedly closed"
+                            self._process_events.put({self.pid: 'FINISHED'})
+                            break
+                        elif errcode == winprocess.WAIT_TIMEOUT:
+                            # Timeouts are expected, just keep on polling
+                            continue
+                        else:
+                            print >> sys.stderr, \
+                                "Error Code %s trying to query IO Completion Port, " \
+                                "exiting" % errcode
+                            raise WinError(errcode)
+                            break
+
+                    if compkey.value == winprocess.COMPKEY_TERMINATE.value:
+                        self.debug("compkeyterminate detected")
+                        # Then we're done
+                        break
+
+                    # Check the status of the IO Port and do things based on it
+                    if compkey.value == winprocess.COMPKEY_JOBOBJECT.value:
+                        if msgid.value == winprocess.JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO:
+                            # No processes left, time to shut down
+                            # Signal anyone waiting on us that it is safe to shut down
+                            self.debug("job object msg active processes zero")
+                            self._process_events.put({self.pid: 'FINISHED'})
+                            break
+                        elif msgid.value == winprocess.JOB_OBJECT_MSG_NEW_PROCESS:
+                            # New Process started
+                            # Add the child proc to our list in case our parent flakes out on us
+                            # without killing everything.
+                            if pid.value != self.pid:
+                                self._spawned_procs[pid.value] = 1
+                                self.debug("new process detected with pid value: %s" % pid.value)
+                        elif msgid.value == winprocess.JOB_OBJECT_MSG_EXIT_PROCESS:
+                            self.debug("process id %s exited normally" % pid.value)
+                            # One process exited normally
+                            if pid.value == self.pid and len(self._spawned_procs) > 0:
+                                # Parent process dying, start countdown timer
+                                countdowntokill = datetime.now()
+                            elif pid.value in self._spawned_procs:
+                                # Child Process died remove from list
+                                del(self._spawned_procs[pid.value])
+                        elif msgid.value == winprocess.JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS:
+                            # One process existed abnormally
+                            self.debug("process id %s exited abnormally" % pid.value)
+                            if pid.value == self.pid and len(self._spawned_procs) > 0:
+                                # Parent process dying, start countdown timer
+                                countdowntokill = datetime.now()
+                            elif pid.value in self._spawned_procs:
+                                # Child Process died remove from list
+                                del self._spawned_procs[pid.value]
+                        else:
+                            # We don't care about anything else
+                            self.debug("We got a message %s" % msgid.value)
+                            pass
+
+            def _wait(self):
+                # First, check to see if the process is still running
+                if self._handle:
+                    self.returncode = winprocess.GetExitCodeProcess(self._handle)
+                else:
+                    # Dude, the process is like totally dead!
+                    return self.returncode
+
+                threadalive = False
+                if hasattr(self, "_procmgrthread"):
+                    threadalive = self._procmgrthread.is_alive()
+                if self._job and threadalive and threading.current_thread() != self._procmgrthread:
+                    self.debug("waiting with IO completion port")
+                    # Then we are managing with IO Completion Ports
+                    # wait on a signal so we know when we have seen the last
+                    # process come through.
+                    # We use queues to synchronize between the thread and this
+                    # function because events just didn't have robust enough error
+                    # handling on pre-2.7 versions
+                    try:
+                        # timeout is the max amount of time the procmgr thread will wait for
+                        # child processes to shutdown before killing them with extreme prejudice.
+                        item = self._process_events.get(
+                            timeout=self.MAX_IOCOMPLETION_PORT_NOTIFICATION_DELAY +
+                            self.MAX_PROCESS_KILL_DELAY)
+                        if item[self.pid] == 'FINISHED':
+                            self.debug("received 'FINISHED' from _procmgrthread")
+                            self._process_events.task_done()
+                    except:
+                        traceback.print_exc()
+                        raise OSError("IO Completion Port failed to signal process shutdown")
+                    finally:
+                        if self._handle:
+                            self.returncode = winprocess.GetExitCodeProcess(self._handle)
+                        self._cleanup()
+
+                else:
+                    # Not managing with job objects, so all we can reasonably do
+                    # is call waitforsingleobject and hope for the best
+                    self.debug("waiting without IO completion port")
+
+                    if not self._ignore_children:
+                        self.debug("NOT USING JOB OBJECTS!!!")
+                    # First, make sure we have not already ended
+                    if self.returncode != winprocess.STILL_ACTIVE:
+                        self._cleanup()
+                        return self.returncode
+
+                    rc = None
+                    if self._handle:
+                        rc = winprocess.WaitForSingleObject(self._handle, -1)
+
+                    if rc == winprocess.WAIT_TIMEOUT:
+                        # The process isn't dead, so kill it
+                        print "Timed out waiting for process to close, attempting TerminateProcess"
+                        self.kill()
+                    elif rc == winprocess.WAIT_OBJECT_0:
+                        # We caught WAIT_OBJECT_0, which indicates all is well
+                        print "Single process terminated successfully"
+                        self.returncode = winprocess.GetExitCodeProcess(self._handle)
+                    else:
+                        # An error occured we should probably throw
+                        rc = winprocess.GetLastError()
+                        if rc:
+                            raise WinError(rc)
+
+                    self._cleanup()
+
+                return self.returncode
+
+            def _cleanup_job_io_port(self):
+                """ Do the job and IO port cleanup separately because there are
+                    cases where we want to clean these without killing _handle
+                    (i.e. if we fail to create the job object in the first place)
+                """
+                if getattr(self, '_job') and self._job != winprocess.INVALID_HANDLE_VALUE:
+                    self._job.Close()
+                    self._job = None
+                else:
+                    # If windows already freed our handle just set it to none
+                    # (saw this intermittently while testing)
+                    self._job = None
+
+                if getattr(self, '_io_port', None) and \
+                   self._io_port != winprocess.INVALID_HANDLE_VALUE:
+                    self._io_port.Close()
+                    self._io_port = None
+                else:
+                    self._io_port = None
+
+                if getattr(self, '_procmgrthread', None):
+                    self._procmgrthread = None
+
+            def _cleanup(self):
+                self._cleanup_job_io_port()
+                if self._thread and self._thread != winprocess.INVALID_HANDLE_VALUE:
+                    self._thread.Close()
+                    self._thread = None
+                else:
+                    self._thread = None
+
+                if self._handle and self._handle != winprocess.INVALID_HANDLE_VALUE:
+                    self._handle.Close()
+                    self._handle = None
+                else:
+                    self._handle = None
+
+        elif isPosix:
+
+            def _wait(self):
+                """ Haven't found any reason to differentiate between these platforms
+                    so they all use the same wait callback.  If it is necessary to
+                    craft different styles of wait, then a new _wait method
+                    could be easily implemented.
+                """
+
+                if not self._ignore_children:
+                    try:
+                        # os.waitpid return value:
+                        # > [...] a tuple containing its pid and exit status
+                        # > indication: a 16-bit number, whose low byte is the
+                        # > signal number that killed the process, and whose
+                        # > high byte is the exit status (if the signal number
+                        # > is zero)
+                        # - http://docs.python.org/2/library/os.html#os.wait
+                        status = os.waitpid(self.pid, 0)[1]
+
+                        # For consistency, format status the same as subprocess'
+                        # returncode attribute
+                        if status > 255:
+                            return status >> 8
+                        return -status
+                    except OSError as e:
+                        if getattr(e, "errno", None) != 10:
+                            # Error 10 is "no child process", which could indicate normal
+                            # close
+                            print >> sys.stderr, \
+                                "Encountered error waiting for pid to close: %s" % e
+                            raise
+
+                        return self.returncode
+
+                else:
+                    # For non-group wait, call base class
+                    subprocess.Popen.wait(self)
+                    return self.returncode
+
+            def _cleanup(self):
+                pass
+
+        else:
+            # An unrecognized platform, we will call the base class for everything
+            print >> sys.stderr, \
+                "Unrecognized platform, process groups may not be managed properly"
+
+            def _wait(self):
+                self.returncode = subprocess.Popen.wait(self)
+                return self.returncode
+
+            def _cleanup(self):
+                pass
+
+    def __init__(self,
+                 cmd,
+                 args=None,
+                 cwd=None,
+                 env=None,
+                 ignore_children=False,
+                 kill_on_timeout=True,
+                 processOutputLine=(),
+                 processStderrLine=(),
+                 onTimeout=(),
+                 onFinish=(),
+                 **kwargs):
+        self.cmd = cmd
+        self.args = args
+        self.cwd = cwd
+        self.didTimeout = False
+        self.didOutputTimeout = False
+        self._ignore_children = ignore_children
+        self.keywordargs = kwargs
+        self.read_buffer = ''
+
+        if env is None:
+            env = os.environ.copy()
+        self.env = env
+
+        # handlers
+        def to_callable_list(arg):
+            if callable(arg):
+                arg = [arg]
+            return CallableList(arg)
+
+        processOutputLine = to_callable_list(processOutputLine)
+        processStderrLine = to_callable_list(processStderrLine)
+        onTimeout = to_callable_list(onTimeout)
+        onFinish = to_callable_list(onFinish)
+
+        def on_timeout():
+            self.didTimeout = True
+            self.didOutputTimeout = self.reader.didOutputTimeout
+            if kill_on_timeout:
+                self.kill()
+        onTimeout.insert(0, on_timeout)
+
+        self._stderr = subprocess.STDOUT
+        if processStderrLine:
+            self._stderr = subprocess.PIPE
+        self.reader = ProcessReader(stdout_callback=processOutputLine,
+                                    stderr_callback=processStderrLine,
+                                    finished_callback=onFinish,
+                                    timeout_callback=onTimeout)
+
+        # It is common for people to pass in the entire array with the cmd and
+        # the args together since this is how Popen uses it.  Allow for that.
+        if isinstance(self.cmd, list):
+            if self.args is not None:
+                raise TypeError("cmd and args must not both be lists")
+            (self.cmd, self.args) = (self.cmd[0], self.cmd[1:])
+        elif self.args is None:
+            self.args = []
+
+    @property
+    def timedOut(self):
+        """True if the process has timed out for any reason."""
+        return self.didTimeout
+
+    @property
+    def outputTimedOut(self):
+        """True if the process has timed out for no output."""
+        return self.didOutputTimeout
+
+    @property
+    def commandline(self):
+        """the string value of the command line (command + args)"""
+        return subprocess.list2cmdline([self.cmd] + self.args)
+
+    def run(self, timeout=None, outputTimeout=None):
+        """
+        Starts the process.
+
+        If timeout is not None, the process will be allowed to continue for
+        that number of seconds before being killed. If the process is killed
+        due to a timeout, the onTimeout handler will be called.
+
+        If outputTimeout is not None, the process will be allowed to continue
+        for that number of seconds without producing any output before
+        being killed.
+        """
+        self.didTimeout = False
+        self.didOutputTimeout = False
+
+        # default arguments
+        args = dict(stdout=subprocess.PIPE,
+                    stderr=self._stderr,
+                    cwd=self.cwd,
+                    env=self.env,
+                    ignore_children=self._ignore_children)
+
+        # build process arguments
+        args.update(self.keywordargs)
+
+        # launch the process
+        self.proc = self.Process([self.cmd] + self.args, **args)
+
+        if isPosix:
+            # Keep track of the initial process group in case the process detaches itself
+            self.proc.pgid = self._getpgid(self.proc.pid)
+            self.proc.detached_pid = None
+
+        self.processOutput(timeout=timeout, outputTimeout=outputTimeout)
+
+    def kill(self, sig=None):
+        """
+        Kills the managed process.
+
+        If you created the process with 'ignore_children=False' (the
+        default) then it will also also kill all child processes spawned by
+        it. If you specified 'ignore_children=True' when creating the
+        process, only the root process will be killed.
+
+        Note that this does not manage any state, save any output etc,
+        it immediately kills the process.
+
+        :param sig: Signal used to kill the process, defaults to SIGKILL
+                    (has no effect on Windows)
+        """
+        if not hasattr(self, 'proc'):
+            raise RuntimeError("Calling kill() on a non started process is not"
+                               " allowed.")
+        self.proc.kill(sig=sig)
+
+        # When we kill the the managed process we also have to wait for the
+        # reader thread to be finished. Otherwise consumers would have to assume
+        # that it still has not completely shutdown.
+        return self.wait()
+
+    def poll(self):
+        """Check if child process has terminated
+
+        Returns the current returncode value:
+        - None if the process hasn't terminated yet
+        - A negative number if the process was killed by signal N (Unix only)
+        - '0' if the process ended without failures
+
+        """
+        # Ensure that we first check for the reader status. Otherwise
+        # we might mark the process as finished while output is still getting
+        # processed.
+        if not hasattr(self, 'proc'):
+            raise RuntimeError("Calling poll() on a non started process is not"
+                               " allowed.")
+        elif self.reader.is_alive():
+            return None
+        elif hasattr(self.proc, "returncode"):
+            return self.proc.returncode
+        else:
+            return self.proc.poll()
+
+    def processOutput(self, timeout=None, outputTimeout=None):
+        """
+        Handle process output until the process terminates or times out.
+
+        If timeout is not None, the process will be allowed to continue for
+        that number of seconds before being killed.
+
+        If outputTimeout is not None, the process will be allowed to continue
+        for that number of seconds without producing any output before
+        being killed.
+        """
+        # this method is kept for backward compatibility
+        if not hasattr(self, 'proc'):
+            self.run(timeout=timeout, outputTimeout=outputTimeout)
+            # self.run will call this again
+            return
+        if not self.reader.is_alive():
+            self.reader.timeout = timeout
+            self.reader.output_timeout = outputTimeout
+            self.reader.start(self.proc)
+
+    def wait(self, timeout=None):
+        """
+        Waits until all output has been read and the process is
+        terminated.
+
+        If timeout is not None, will return after timeout seconds.
+        This timeout only causes the wait function to return and
+        does not kill the process.
+
+        Returns the process exit code value:
+        - None if the process hasn't terminated yet
+        - A negative number if the process was killed by signal N (Unix only)
+        - '0' if the process ended without failures
+
+        """
+        if self.reader.thread and self.reader.thread is not threading.current_thread():
+            # Thread.join() blocks the main thread until the reader thread is finished
+            # wake up once a second in case a keyboard interrupt is sent
+            count = 0
+            while self.reader.is_alive():
+                self.reader.thread.join(timeout=1)
+                count += 1
+                if timeout and count > timeout:
+                    return None
+
+        self.returncode = self.proc.wait()
+        return self.returncode
+
+    # TODO Remove this method when consumers have been fixed
+    def waitForFinish(self, timeout=None):
+        print >> sys.stderr, "MOZPROCESS WARNING: ProcessHandler.waitForFinish() is deprecated, " \
+                             "use ProcessHandler.wait() instead"
+        return self.wait(timeout=timeout)
+
+    @property
+    def pid(self):
+        return self.proc.pid
+
+    @classmethod
+    def _getpgid(cls, pid):
+        try:
+            return os.getpgid(pid)
+        except OSError as e:
+            # Do not raise for "No such process"
+            if e.errno != errno.ESRCH:
+                raise
+
+    def check_for_detached(self, new_pid):
+        """Check if the current process has been detached and mark it appropriately.
+
+        In case of application restarts the process can spawn itself into a new process group.
+        From now on the process can no longer be tracked by mozprocess anymore and has to be
+        marked as detached. If the consumer of mozprocess still knows the new process id it could
+        check for the detached state.
+
+        new_pid is the new process id of the child process.
+        """
+        if not self.proc:
+            return
+
+        if isPosix:
+            new_pgid = self._getpgid(new_pid)
+
+            if new_pgid and new_pgid != self.proc.pgid:
+                self.proc.detached_pid = new_pid
+                print >> sys.stdout, \
+                    'Child process with id "%s" has been marked as detached because it is no ' \
+                    'longer in the managed process group. Keeping reference to the process id ' \
+                    '"%s" which is the new child process.' % (self.pid, new_pid)
+
+
+class CallableList(list):
+
+    def __call__(self, *args, **kwargs):
+        for e in self:
+            e(*args, **kwargs)
+
+    def __add__(self, lst):
+        return CallableList(list.__add__(self, lst))
+
+
+class ProcessReader(object):
+
+    def __init__(self, stdout_callback=None, stderr_callback=None,
+                 finished_callback=None, timeout_callback=None,
+                 timeout=None, output_timeout=None):
+        self.stdout_callback = stdout_callback or (lambda line: True)
+        self.stderr_callback = stderr_callback or (lambda line: True)
+        self.finished_callback = finished_callback or (lambda: True)
+        self.timeout_callback = timeout_callback or (lambda: True)
+        self.timeout = timeout
+        self.output_timeout = output_timeout
+        self.didOutputTimeout = False
+        self.thread = None
+
+    def _create_stream_reader(self, name, stream, queue, callback):
+        thread = threading.Thread(name=name,
+                                  target=self._read_stream,
+                                  args=(stream, queue, callback))
+        thread.daemon = True
+        thread.start()
+        return thread
+
+    def _read_stream(self, stream, queue, callback):
+        while True:
+            line = stream.readline()
+            if not line:
+                break
+            queue.put((line, callback))
+        stream.close()
+
+    def start(self, proc):
+        queue = Queue()
+        stdout_reader = None
+        if proc.stdout:
+            stdout_reader = self._create_stream_reader('ProcessReaderStdout',
+                                                       proc.stdout,
+                                                       queue,
+                                                       self.stdout_callback)
+        stderr_reader = None
+        if proc.stderr and proc.stderr != proc.stdout:
+            stderr_reader = self._create_stream_reader('ProcessReaderStderr',
+                                                       proc.stderr,
+                                                       queue,
+                                                       self.stderr_callback)
+        self.thread = threading.Thread(name='ProcessReader',
+                                       target=self._read,
+                                       args=(stdout_reader,
+                                             stderr_reader,
+                                             queue))
+        self.thread.daemon = True
+        self.thread.start()
+
+    def _read(self, stdout_reader, stderr_reader, queue):
+        start_time = time.time()
+        timed_out = False
+        timeout = self.timeout
+        if timeout is not None:
+            timeout += start_time
+        output_timeout = self.output_timeout
+        if output_timeout is not None:
+            output_timeout += start_time
+
+        while (stdout_reader and stdout_reader.is_alive()) \
+                or (stderr_reader and stderr_reader.is_alive()):
+            has_line = True
+            try:
+                line, callback = queue.get(True, 0.02)
+            except Empty:
+                has_line = False
+            now = time.time()
+            if not has_line:
+                if output_timeout is not None and now > output_timeout:
+                    timed_out = True
+                    self.didOutputTimeout = True
+                    break
+            else:
+                if output_timeout is not None:
+                    output_timeout = now + self.output_timeout
+                callback(line.rstrip())
+            if timeout is not None and now > timeout:
+                timed_out = True
+                break
+        # process remaining lines to read
+        while not queue.empty():
+            line, callback = queue.get(False)
+            callback(line.rstrip())
+        if timed_out:
+            self.timeout_callback()
+        if stdout_reader:
+            stdout_reader.join()
+        if stderr_reader:
+            stderr_reader.join()
+        if not timed_out:
+            self.finished_callback()
+
+    def is_alive(self):
+        if self.thread:
+            return self.thread.is_alive()
+        return False
+
+# default output handlers
+# these should be callables that take the output line
+
+
+class StoreOutput(object):
+    """accumulate stdout"""
+
+    def __init__(self):
+        self.output = []
+
+    def __call__(self, line):
+        self.output.append(line)
+
+
+class StreamOutput(object):
+    """pass output to a stream and flush"""
+
+    def __init__(self, stream):
+        self.stream = stream
+
+    def __call__(self, line):
+        try:
+            self.stream.write(line + '\n')
+        except UnicodeDecodeError:
+            # TODO: Workaround for bug #991866 to make sure we can display when
+            # when normal UTF-8 display is failing
+            self.stream.write(line.decode('iso8859-1') + '\n')
+        self.stream.flush()
+
+
+class LogOutput(StreamOutput):
+    """pass output to a file"""
+
+    def __init__(self, filename):
+        self.file_obj = open(filename, 'a')
+        StreamOutput.__init__(self, self.file_obj)
+
+    def __del__(self):
+        if self.file_obj is not None:
+            self.file_obj.close()
+
+
+# front end class with the default handlers
+
+
+class ProcessHandler(ProcessHandlerMixin):
+    """
+    Convenience class for handling processes with default output handlers.
+
+    By default, all output is sent to stdout. This can be disabled by setting
+    the *stream* argument to None.
+
+    If processOutputLine keyword argument is specified the function or the
+    list of functions specified by this argument will be called for each line
+    of output; the output will not be written to stdout automatically then
+    if stream is True (the default).
+
+    If storeOutput==True, the output produced by the process will be saved
+    as self.output.
+
+    If logfile is not None, the output produced by the process will be
+    appended to the given file.
+    """
+
+    def __init__(self, cmd, logfile=None, stream=True, storeOutput=True,
+                 **kwargs):
+        kwargs.setdefault('processOutputLine', [])
+        if callable(kwargs['processOutputLine']):
+            kwargs['processOutputLine'] = [kwargs['processOutputLine']]
+
+        if logfile:
+            logoutput = LogOutput(logfile)
+            kwargs['processOutputLine'].append(logoutput)
+
+        if stream is True:
+            # Print to standard output only if no outputline provided
+            if not kwargs['processOutputLine']:
+                kwargs['processOutputLine'].append(StreamOutput(sys.stdout))
+        elif stream:
+            streamoutput = StreamOutput(stream)
+            kwargs['processOutputLine'].append(streamoutput)
+
+        self.output = None
+        if storeOutput:
+            storeoutput = StoreOutput()
+            self.output = storeoutput.output
+            kwargs['processOutputLine'].append(storeoutput)
+
+        ProcessHandlerMixin.__init__(self, cmd, **kwargs)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozprocess/qijo.py
@@ -0,0 +1,167 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import absolute_import
+
+from ctypes import (
+    c_void_p,
+    POINTER,
+    sizeof,
+    Structure,
+    windll,
+    WinError,
+    WINFUNCTYPE,
+    addressof,
+    c_size_t,
+    c_ulong
+)
+
+from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LARGE_INTEGER
+
+LPVOID = c_void_p
+LPDWORD = POINTER(DWORD)
+SIZE_T = c_size_t
+ULONG_PTR = POINTER(c_ulong)
+
+# A ULONGLONG is a 64-bit unsigned integer.
+# Thus there are 8 bytes in a ULONGLONG.
+# XXX why not import c_ulonglong ?
+ULONGLONG = BYTE * 8
+
+
+class IO_COUNTERS(Structure):
+    # The IO_COUNTERS struct is 6 ULONGLONGs.
+    # TODO: Replace with non-dummy fields.
+    _fields_ = [('dummy', ULONGLONG * 6)]
+
+
+class JOBOBJECT_BASIC_ACCOUNTING_INFORMATION(Structure):
+    _fields_ = [('TotalUserTime', LARGE_INTEGER),
+                ('TotalKernelTime', LARGE_INTEGER),
+                ('ThisPeriodTotalUserTime', LARGE_INTEGER),
+                ('ThisPeriodTotalKernelTime', LARGE_INTEGER),
+                ('TotalPageFaultCount', DWORD),
+                ('TotalProcesses', DWORD),
+                ('ActiveProcesses', DWORD),
+                ('TotalTerminatedProcesses', DWORD)]
+
+
+class JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION(Structure):
+    _fields_ = [('BasicInfo', JOBOBJECT_BASIC_ACCOUNTING_INFORMATION),
+                ('IoInfo', IO_COUNTERS)]
+
+
+# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
+class JOBOBJECT_BASIC_LIMIT_INFORMATION(Structure):
+    _fields_ = [('PerProcessUserTimeLimit', LARGE_INTEGER),
+                ('PerJobUserTimeLimit', LARGE_INTEGER),
+                ('LimitFlags', DWORD),
+                ('MinimumWorkingSetSize', SIZE_T),
+                ('MaximumWorkingSetSize', SIZE_T),
+                ('ActiveProcessLimit', DWORD),
+                ('Affinity', ULONG_PTR),
+                ('PriorityClass', DWORD),
+                ('SchedulingClass', DWORD)
+                ]
+
+
+class JOBOBJECT_ASSOCIATE_COMPLETION_PORT(Structure):
+    _fields_ = [('CompletionKey', c_ulong),
+                ('CompletionPort', HANDLE)]
+
+
+# see http://msdn.microsoft.com/en-us/library/ms684156%28VS.85%29.aspx
+class JOBOBJECT_EXTENDED_LIMIT_INFORMATION(Structure):
+    _fields_ = [('BasicLimitInformation', JOBOBJECT_BASIC_LIMIT_INFORMATION),
+                ('IoInfo', IO_COUNTERS),
+                ('ProcessMemoryLimit', SIZE_T),
+                ('JobMemoryLimit', SIZE_T),
+                ('PeakProcessMemoryUsed', SIZE_T),
+                ('PeakJobMemoryUsed', SIZE_T)]
+
+
+# These numbers below come from:
+# http://msdn.microsoft.com/en-us/library/ms686216%28v=vs.85%29.aspx
+JobObjectAssociateCompletionPortInformation = 7
+JobObjectBasicAndIoAccountingInformation = 8
+JobObjectExtendedLimitInformation = 9
+
+
+class JobObjectInfo(object):
+    mapping = {'JobObjectBasicAndIoAccountingInformation': 8,
+               'JobObjectExtendedLimitInformation': 9,
+               'JobObjectAssociateCompletionPortInformation': 7}
+    structures = {
+        7: JOBOBJECT_ASSOCIATE_COMPLETION_PORT,
+        8: JOBOBJECT_BASIC_AND_IO_ACCOUNTING_INFORMATION,
+        9: JOBOBJECT_EXTENDED_LIMIT_INFORMATION
+    }
+
+    def __init__(self, _class):
+        if isinstance(_class, basestring):
+            assert _class in self.mapping, \
+                'Class should be one of %s; you gave %s' % (self.mapping, _class)
+            _class = self.mapping[_class]
+        assert _class in self.structures, \
+            'Class should be one of %s; you gave %s' % (self.structures, _class)
+        self.code = _class
+        self.info = self.structures[_class]()
+
+
+QueryInformationJobObjectProto = WINFUNCTYPE(
+    BOOL,        # Return type
+    HANDLE,      # hJob
+    DWORD,       # JobObjectInfoClass
+    LPVOID,      # lpJobObjectInfo
+    DWORD,       # cbJobObjectInfoLength
+    LPDWORD      # lpReturnLength
+)
+
+QueryInformationJobObjectFlags = (
+    (1, 'hJob'),
+    (1, 'JobObjectInfoClass'),
+    (1, 'lpJobObjectInfo'),
+    (1, 'cbJobObjectInfoLength'),
+    (1, 'lpReturnLength', None)
+)
+
+_QueryInformationJobObject = QueryInformationJobObjectProto(
+    ('QueryInformationJobObject', windll.kernel32),
+    QueryInformationJobObjectFlags
+)
+
+
+class SubscriptableReadOnlyStruct(object):
+
+    def __init__(self, struct):
+        self._struct = struct
+
+    def _delegate(self, name):
+        result = getattr(self._struct, name)
+        if isinstance(result, Structure):
+            return SubscriptableReadOnlyStruct(result)
+        return result
+
+    def __getitem__(self, name):
+        match = [fname for fname, ftype in self._struct._fields_
+                 if fname == name]
+        if match:
+            return self._delegate(name)
+        raise KeyError(name)
+
+    def __getattr__(self, name):
+        return self._delegate(name)
+
+
+def QueryInformationJobObject(hJob, JobObjectInfoClass):
+    jobinfo = JobObjectInfo(JobObjectInfoClass)
+    result = _QueryInformationJobObject(
+        hJob=hJob,
+        JobObjectInfoClass=jobinfo.code,
+        lpJobObjectInfo=addressof(jobinfo.info),
+        cbJobObjectInfoLength=sizeof(jobinfo.info)
+    )
+    if not result:
+        raise WinError()
+    return SubscriptableReadOnlyStruct(jobinfo.info)
new file mode 100644
--- /dev/null
+++ b/testing/mozharness/mozprocess/winprocess.py
@@ -0,0 +1,485 @@
+# A module to expose various thread/process/job related structures and
+# methods from kernel32
+#
+# The MIT License
+#
+# Copyright (c) 2003-2004 by Peter Astrand <astrand@lysator.liu.se>
+#
+# Additions and modifications written by Benjamin Smedberg
+# <benjamin@smedbergs.us> are Copyright (c) 2006 by the Mozilla Foundation
+# <http://www.mozilla.org/>
+#
+# More Modifications
+# Copyright (c) 2006-2007 by Mike Taylor <bear@code-bear.com>
+# Copyright (c) 2007-2008 by Mikeal Rogers <mikeal@mozilla.com>
+#
+# By obtaining, using, and/or copying this software and/or its
+# associated documentation, you agree that you have read, understood,
+# and will comply with the following terms and conditions:
+#
+# Permission to use, copy, modify, and distribute this software and
+# its associated documentation for any purpose and without fee is
+# hereby granted, provided that the above copyright notice appears in
+# all copies, and that both that copyright notice and this permission
+# notice appear in supporting documentation, and that the name of the
+# author not be used in advertising or publicity pertaining to
+# distribution of the software without specific, written prior
+# permission.
+#
+# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS.
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
+# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
+# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
+# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+from __future__ import absolute_import, unicode_literals
+
+import sys
+import subprocess
+
+from ctypes import c_void_p, POINTER, sizeof, Structure, windll, WinError, WINFUNCTYPE, c_ulong
+from ctypes.wintypes import BOOL, BYTE, DWORD, HANDLE, LPCWSTR, LPWSTR, UINT, WORD
+from .qijo import QueryInformationJobObject
+
+LPVOID = c_void_p
+LPBYTE = POINTER(BYTE)
+LPDWORD = POINTER(DWORD)
+LPBOOL = POINTER(BOOL)
+LPULONG = POINTER(c_ulong)
+
+
+def ErrCheckBool(result, func, args):
+    """errcheck function for Windows functions that return a BOOL True
+    on success"""
+    if not result:
+        raise WinError()
+    return args
+
+
+# AutoHANDLE
+
+class AutoHANDLE(HANDLE):
+    """Subclass of HANDLE which will call CloseHandle() on deletion."""
+
+    CloseHandleProto = WINFUNCTYPE(BOOL, HANDLE)
+    CloseHandle = CloseHandleProto(("CloseHandle", windll.kernel32))
+    CloseHandle.errcheck = ErrCheckBool
+
+    def Close(self):
+        if self.value and self.value != HANDLE(-1).value:
+            self.CloseHandle(self)
+            self.value = 0
+
+    def __del__(self):
+        self.Close()
+
+    def __int__(self):
+        return self.value
+
+
+def ErrCheckHandle(result, func, args):
+    """errcheck function for Windows functions that return a HANDLE."""
+    if not result:
+        raise WinError()
+    return AutoHANDLE(result)
+
+# PROCESS_INFORMATION structure
+
+
+class PROCESS_INFORMATION(Structure):
+    _fields_ = [("hProcess", HANDLE),
+                ("hThread", HANDLE),
+                ("dwProcessID", DWORD),
+                ("dwThreadID", DWORD)]
+
+    def __init__(self):
+        Structure.__init__(self)
+
+        self.cb = sizeof(self)
+
+
+LPPROCESS_INFORMATION = POINTER(PROCESS_INFORMATION)
+
+# STARTUPINFO structure
+
+
+class STARTUPINFO(Structure):
+    _fields_ = [("cb", DWORD),
+                ("lpReserved", LPWSTR),
+                ("lpDesktop", LPWSTR),
+                ("lpTitle", LPWSTR),
+                ("dwX", DWORD),
+                ("dwY", DWORD),
+                ("dwXSize", DWORD),
+                ("dwYSize", DWORD),
+                ("dwXCountChars", DWORD),
+                ("dwYCountChars", DWORD),
+                ("dwFillAttribute", DWORD),
+                ("dwFlags", DWORD),
+                ("wShowWindow", WORD),
+                ("cbReserved2", WORD),
+                ("lpReserved2", LPBYTE),
+                ("hStdInput", HANDLE),
+                ("hStdOutput", HANDLE),
+                ("hStdError", HANDLE)
+                ]
+
+
+LPSTARTUPINFO = POINTER(STARTUPINFO)
+
+SW_HIDE = 0
+
+STARTF_USESHOWWINDOW = 0x01
+STARTF_USESIZE = 0x02
+STARTF_USEPOSITION = 0x04
+STARTF_USECOUNTCHARS = 0x08
+STARTF_USEFILLATTRIBUTE = 0x10
+STARTF_RUNFULLSCREEN = 0x20
+STARTF_FORCEONFEEDBACK = 0x40
+STARTF_FORCEOFFFEEDBACK = 0x80
+STARTF_USESTDHANDLES = 0x100
+
+# EnvironmentBlock
+
+
+class EnvironmentBlock:
+    """An object which can be passed as the lpEnv parameter of CreateProcess.
+    It is initialized with a dictionary."""
+
+    def __init__(self, env):
+        if not env:
+            self._as_parameter_ = None
+        else:
+            values = []
+            fs_encoding = sys.getfilesystemencoding() or 'mbcs'
+            for k, v in env.iteritems():
+                if isinstance(k, bytes):
+                    k = k.decode(fs_encoding, 'replace')
+                if isinstance(v, bytes):
+                    v = v.decode(fs_encoding, 'replace')
+                values.append("{}={}".format(k, v))
+            values.append("")
+            self._as_parameter_ = LPCWSTR("\0".join(values))
+
+
+# Error Messages we need to watch for go here
+# See: http://msdn.microsoft.com/en-us/library/ms681388%28v=vs.85%29.aspx
+ERROR_ABANDONED_WAIT_0 = 735
+
+# GetLastError()
+GetLastErrorProto = WINFUNCTYPE(DWORD)  # Return Type
+GetLastErrorFlags = ()
+GetLastError = GetLastErrorProto(("GetLastError", windll.kernel32), GetLastErrorFlags)
+
+# CreateProcess()
+
+CreateProcessProto = WINFUNCTYPE(BOOL,                  # Return type
+                                 LPCWSTR,               # lpApplicationName
+                                 LPWSTR,                # lpCommandLine
+                                 LPVOID,                # lpProcessAttributes
+                                 LPVOID,                # lpThreadAttributes
+                                 BOOL,                  # bInheritHandles
+                                 DWORD,                 # dwCreationFlags
+                                 LPVOID,                # lpEnvironment
+                                 LPCWSTR,               # lpCurrentDirectory
+                                 LPSTARTUPINFO,         # lpStartupInfo
+                                 LPPROCESS_INFORMATION  # lpProcessInformation
+                                 )
+
+CreateProcessFlags = ((1, "lpApplicationName", None),
+                      (1, "lpCommandLine"),
+                      (1, "lpProcessAttributes", None),
+                      (1, "lpThreadAttributes", None),
+                      (1, "bInheritHandles", True),
+                      (1, "dwCreationFlags", 0),
+                      (1, "lpEnvironment", None),
+                      (1, "lpCurrentDirectory", None),
+                      (1, "lpStartupInfo"),
+                      (2, "lpProcessInformation"))
+
+
+def ErrCheckCreateProcess(result, func, args):
+    ErrCheckBool(result, func, args)
+    # return a tuple (hProcess, hThread, dwProcessID, dwThreadID)
+    pi = args[9]
+    return AutoHANDLE(pi.hProcess), AutoHANDLE(pi.hThread), pi.dwProcessID, pi.dwThreadID
+
+
+CreateProcess = CreateProcessProto(("CreateProcessW", windll.kernel32),
+                                   CreateProcessFlags)
+CreateProcess.errcheck = ErrCheckCreateProcess
+
+# flags for CreateProcess
+CREATE_BREAKAWAY_FROM_JOB = 0x01000000
+CREATE_DEFAULT_ERROR_MODE = 0x04000000
+CREATE_NEW_CONSOLE = 0x00000010
+CREATE_NEW_PROCESS_GROUP = 0x00000200
+CREATE_NO_WINDOW = 0x08000000
+CREATE_SUSPENDED = 0x00000004
+CREATE_UNICODE_ENVIRONMENT = 0x00000400
+
+# Flags for IOCompletion ports (some of these would probably be defined if
+# we used the win32 extensions for python, but we don't want to do that if we
+# can help it.
+INVALID_HANDLE_VALUE = HANDLE(-1)  # From winbase.h
+
+# Self Defined Constants for IOPort <--> Job Object communication
+COMPKEY_TERMINATE = c_ulong(0)
+COMPKEY_JOBOBJECT = c_ulong(1)
+
+# flags for job limit information
+# see http://msdn.microsoft.com/en-us/library/ms684147%28VS.85%29.aspx
+JOB_OBJECT_LIMIT_BREAKAWAY_OK = 0x00000800
+JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK = 0x00001000
+
+# Flags for Job Object Completion Port Message IDs from winnt.h
+# See also: http://msdn.microsoft.com/en-us/library/ms684141%28v=vs.85%29.aspx
+JOB_OBJECT_MSG_END_OF_JOB_TIME = 1
+JOB_OBJECT_MSG_END_OF_PROCESS_TIME = 2
+JOB_OBJECT_MSG_ACTIVE_PROCESS_LIMIT = 3
+JOB_OBJECT_MSG_ACTIVE_PROCESS_ZERO = 4
+JOB_OBJECT_MSG_NEW_PROCESS = 6
+JOB_OBJECT_MSG_EXIT_PROCESS = 7
+JOB_OBJECT_MSG_ABNORMAL_EXIT_PROCESS = 8
+JOB_OBJECT_MSG_PROCESS_MEMORY_LIMIT = 9
+JOB_OBJECT_MSG_JOB_MEMORY_LIMIT = 10
+
+# See winbase.h
+DEBUG_ONLY_THIS_PROCESS = 0x00000002
+DEBUG_PROCESS = 0x00000001
+DETACHED_PROCESS = 0x00000008
+
+# GetQueuedCompletionPortStatus -
+# http://msdn.microsoft.com/en-us/library/aa364986%28v=vs.85%29.aspx
+GetQueuedCompletionStatusProto = WINFUNCTYPE(BOOL,         # Return Type
+                                             HANDLE,       # Completion Port
+                                             LPDWORD,      # Msg ID
+                                             LPULONG,      # Completion Key
+                                             # PID Returned from the call (may be null)
+                                             LPULONG,
+                                             DWORD)        # milliseconds to wait
+GetQueuedCompletionStatusFlags = ((1, "CompletionPort", INVALID_HANDLE_VALUE),
+                                  (1, "lpNumberOfBytes", None),
+                                  (1, "lpCompletionKey", None),
+                                  (1, "lpPID", None),
+                                  (1, "dwMilliseconds", 0))
+GetQueuedCompletionStatus = GetQueuedCompletionStatusProto(("GetQueuedCompletionStatus",
+                                                            windll.kernel32),
+                                                           GetQueuedCompletionStatusFlags)
+
+# CreateIOCompletionPort
+# Note that the completion key is just a number, not a pointer.
+CreateIoCompletionPortProto = WINFUNCTYPE(HANDLE,      # Return Type
+                                          HANDLE,      # File Handle
+                                          HANDLE,      # Existing Completion Port
+                                          c_ulong,     # Completion Key
+                                          DWORD)       # Number of Threads
+
+CreateIoCompletionPortFlags = ((1, "FileHandle", INVALID_HANDLE_VALUE),
+                               (1, "ExistingCompletionPort", 0),
+                               (1, "CompletionKey", c_ulong(0)),
+                               (1, "NumberOfConcurrentThreads", 0))
+CreateIoCompletionPort = CreateIoCompletionPortProto(("CreateIoCompletionPort",
+                                                      windll.kernel32),
+                                                     CreateIoCompletionPortFlags)
+CreateIoCompletionPort.errcheck = ErrCheckHandle
+
+# SetInformationJobObject
+SetInformationJobObjectProto = WINFUNCTYPE(BOOL,      # Return Type
+                                           HANDLE,    # Job Handle
+                                           DWORD,     # Type of Class next param is
+                                           LPVOID,    # Job Object Class
+                                           DWORD)     # Job Object Class Length
+
+SetInformationJobObjectProtoFlags = ((1, "hJob", None),
+                                     (1, "JobObjectInfoClass", None),
+                                     (1, "lpJobObjectInfo", None),
+                                     (1, "cbJobObjectInfoLength", 0))
+SetInformationJobObject = SetInformationJobObjectProto(("SetInformationJobObject",
+                                                        windll.kernel32),
+                                                       SetInformationJobObjectProtoFlags)
+SetInformationJobObject.errcheck = ErrCheckBool
+
+# CreateJobObject()
+CreateJobObjectProto = WINFUNCTYPE(HANDLE,             # Return type
+                                   LPVOID,             # lpJobAttributes
+                                   LPCWSTR             # lpName
+                                   )
+
+CreateJobObjectFlags = ((1, "lpJobAttributes", None),
+                        (1, "lpName", None))
+
+CreateJobObject = CreateJobObjectProto(("CreateJobObjectW", windll.kernel32),
+                                       CreateJobObjectFlags)
+CreateJobObject.errcheck = ErrCheckHandle
+
+# AssignProcessToJobObject()
+
+AssignProcessToJobObjectProto = WINFUNCTYPE(BOOL,      # Return type
+                                            HANDLE,    # hJob
+                                            HANDLE     # hProcess
+                                            )
+AssignProcessToJobObjectFlags = ((1, "hJob"),
+                                 (1, "hProcess"))
+AssignProcessToJobObject = AssignProcessToJobObjectProto(
+    ("AssignProcessToJobObject", windll.kernel32),
+    AssignProcessToJobObjectFlags)
+AssignProcessToJobObject.errcheck = ErrCheckBool
+
+# GetCurrentProcess()
+# because os.getPid() is way too easy
+GetCurrentProcessProto = WINFUNCTYPE(HANDLE    # Return type
+                                     )
+GetCurrentProcessFlags = ()
+GetCurrentProcess = GetCurrentProcessProto(
+    ("GetCurrentProcess", windll.kernel32),
+    GetCurrentProcessFlags)
+GetCurrentProcess.errcheck = ErrCheckHandle
+
+# IsProcessInJob()
+try:
+    IsProcessInJobProto = WINFUNCTYPE(BOOL,     # Return type
+                                      HANDLE,   # Process Handle
+                                      HANDLE,   # Job Handle
+                                      LPBOOL      # Result
+                                      )
+    IsProcessInJobFlags = ((1, "ProcessHandle"),
+                           (1, "JobHandle", HANDLE(0)),
+                           (2, "Result"))
+    IsProcessInJob = IsProcessInJobProto(
+        ("IsProcessInJob", windll.kernel32),
+        IsProcessInJobFlags)
+    IsProcessInJob.errcheck = ErrCheckBool
+except AttributeError:
+    # windows 2k doesn't have this API
+    def IsProcessInJob(process):
+        return False
+
+
+# ResumeThread()
+
+def ErrCheckResumeThread(result, func, args):
+    if result == -1:
+        raise WinError()
+
+    return args
+
+
+ResumeThreadProto = WINFUNCTYPE(DWORD,      # Return type
+                                HANDLE      # hThread
+                                )
+ResumeThreadFlags = ((1, "hThread"),)
+ResumeThread = ResumeThreadProto(("ResumeThread", windll.kernel32),
+                                 ResumeThreadFlags)
+ResumeThread.errcheck = ErrCheckResumeThread
+
+# TerminateProcess()
+
+TerminateProcessProto = WINFUNCTYPE(BOOL,    # Return type
+                                    HANDLE,  # hProcess
+                                    UINT     # uExitCode
+                                    )
+TerminateProcessFlags = ((1, "hProcess"),
+                         (1, "uExitCode", 127))
+TerminateProcess = TerminateProcessProto(
+    ("TerminateProcess", windll.kernel32),
+    TerminateProcessFlags)
+TerminateProcess.errcheck = ErrCheckBool
+
+# TerminateJobObject()
+
+TerminateJobObjectProto = WINFUNCTYPE(BOOL,    # Return type
+                                      HANDLE,  # hJob
+                                      UINT     # uExitCode
+                                      )
+TerminateJobObjectFlags = ((1, "hJob"),
+                           (1, "uExitCode", 127))
+TerminateJobObject = TerminateJobObjectProto(
+    ("TerminateJobObject", windll.kernel32),
+    TerminateJobObjectFlags)
+TerminateJobObject.errcheck = ErrCheckBool
+
+# WaitForSingleObject()
+
+WaitForSingleObjectProto = WINFUNCTYPE(DWORD,   # Return type
+                                       HANDLE,  # hHandle
+                                       DWORD,   # dwMilliseconds
+                                       )
+WaitForSingleObjectFlags = ((1, "hHandle"),
+                            (1, "dwMilliseconds", -1))
+WaitForSingleObject = WaitForSingleObjectProto(
+    ("WaitForSingleObject", windll.kernel32),
+    WaitForSingleObjectFlags)
+
+# http://msdn.microsoft.com/en-us/library/ms681381%28v=vs.85%29.aspx
+INFINITE = -1
+WAIT_TIMEOUT = 0x0102
+WAIT_OBJECT_0 = 0x0
+WAIT_ABANDONED = 0x0080
+
+# http://msdn.microsoft.com/en-us/library/ms683189%28VS.85%29.aspx
+STILL_ACTIVE = 259
+
+# Used when we terminate a process.
+ERROR_CONTROL_C_EXIT = 0x23c
+
+# GetExitCodeProcess()
+
+GetExitCodeProcessProto = WINFUNCTYPE(BOOL,     # Return type
+                                      HANDLE,   # hProcess
+                                      LPDWORD,  # lpExitCode
+                                      )
+GetExitCodeProcessFlags = ((1, "hProcess"),
+                           (2, "lpExitCode"))
+GetExitCodeProcess = GetExitCodeProcessProto(
+    ("GetExitCodeProcess", windll.kernel32),
+    GetExitCodeProcessFlags)
+GetExitCodeProcess.errcheck = ErrCheckBool
+
+
+def CanCreateJobObject():
+    currentProc = GetCurrentProcess()
+    if IsProcessInJob(currentProc):
+        jobinfo = QueryInformationJobObject(HANDLE(0), 'JobObjectExtendedLimitInformation')
+        limitflags = jobinfo['BasicLimitInformation']['LimitFlags']
+        return bool(limitflags & JOB_OBJECT_LIMIT_BREAKAWAY_OK) or \
+            bool(limitflags & JOB_OBJECT_LIMIT_SILENT_BREAKAWAY_OK)
+    else:
+        return True
+
+# testing functions
+
+
+def parent():
+    print 'Starting parent'
+    currentProc = GetCurrentProcess()
+    if IsProcessInJob(currentProc):
+        print >> sys.stderr, "You should not be in a job object to test"
+        sys.exit(1)
+    assert CanCreateJobObject()
+    print 'File: %s' % __file__
+    command = [sys.executable, __file__, '-child']
+    print 'Running command: %s' % command
+    process = subprocess.Popen(command)
+    process.kill()
+    code = process.returncode
+    print 'Child code: %s' % code
+    assert code == 127
+
+
+def child():
+    print 'Starting child'
+    currentProc = GetCurrentProcess()
+    injob = IsProcessInJob(currentProc)
+    print "Is in a job?: %s" % injob
+    can_create = CanCreateJobObject()
+    print 'Can create job?: %s' % can_create
+    process = subprocess.Popen('c:\\windows\\notepad.exe')
+    assert process._job
+    jobinfo = QueryInformationJobObject(process._job, 'JobObjectExtendedLimitInformation')
+    print 'Job info: %s' % jobinfo
+    limitflags = jobinfo['BasicLimitInformation']['LimitFlags']
+    print 'LimitFlags: %s' % limitflags
+    process.kill()
--- a/testing/mozharness/tox.ini
+++ b/testing/mozharness/tox.ini
@@ -1,23 +1,21 @@
 [tox]
 envlist = py27-hg4.3
 
 [base]
 deps =
     coverage
     nose
     rednose
-mozbase = {toxinidir}/../mozbase
 
 [testenv]
 basepython = python2.7
 setenv =
     HGRCPATH = {toxinidir}/test/hgrc
-    PYTHONPATH = $PYTHONPATH:{[base]mozbase}/manifestparser:{[base]mozbase}/mozfile:{[base]mozbase}/mozinfo:{[base]mozbase}/mozprocess
 
 commands =
     coverage run --source configs,mozharness,scripts --branch {envbindir}/nosetests -v --with-xunit --rednose --force-color {posargs}
 
 [testenv:py27-hg4.3]
 deps =
     {[base]deps}
     mercurial==4.3.1