Bug 1263425 - Include crashes in Marionette runner's error/failure count; r?AutomatedTester draft
authorMaja Frydrychowicz <mjzffr@gmail.com>
Mon, 18 Apr 2016 15:33:43 -0400
changeset 352767 9889b5ae3bc61fa1a5322f1dbd6f0888634065e1
parent 352668 6066850740cd4711ee5502fda89f422440b7c2cc
child 518738 eb79f4c7ac8bd58e6f95e9506dd9a0dc9545206e
push id15791
push usermjzffr@gmail.com
push dateMon, 18 Apr 2016 19:36:59 +0000
reviewersAutomatedTester
bugs1263425
milestone48.0a1
Bug 1263425 - Include crashes in Marionette runner's error/failure count; r?AutomatedTester This addresses the case where there is a crash immediately after a passing test, in which case the test suite stops and no failure is recorded. MozReview-Commit-ID: LDzQfQX5NZQ
testing/marionette/harness/marionette/runner/base.py
testing/marionette/harness/marionette/runtests.py
testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
--- a/testing/marionette/harness/marionette/runner/base.py
+++ b/testing/marionette/harness/marionette/runner/base.py
@@ -210,16 +210,20 @@ class MarionetteTestResult(StructuredTes
                     self.logger.info(' '.join(line).encode('ascii', 'replace'))
                 self.logger.info('END LOG:')
 
     def stopTest(self, *args, **kwargs):
         unittest._TextTestResult.stopTest(self, *args, **kwargs)
         if self.marionette.check_for_crash():
             # this tells unittest.TestSuite not to continue running tests
             self.shouldStop = True
+            test = next((a for a in args if isinstance(a, unittest.TestCase)),
+                        None)
+            if test:
+                self.addError(test, sys.exc_info())
 
 
 class MarionetteTextTestRunner(StructuredTestRunner):
 
     resultclass = MarionetteTestResult
 
     def __init__(self, **kwargs):
         self.marionette = kwargs.pop('marionette')
@@ -729,16 +733,17 @@ class BaseMarionetteTestRunner(object):
             self.marionette.cleanup()
             if self.marionette.instance:
                 self.marionette.instance = None
         self.marionette = None
 
     def reset_test_stats(self):
         self.passed = 0
         self.failed = 0
+        self.crashed = 0
         self.unexpected_successes = 0
         self.todo = 0
         self.skipped = 0
         self.failures = []
 
     def _build_kwargs(self):
         if self.logdir and not os.access(self.logdir, os.F_OK):
             os.mkdir(self.logdir)
@@ -847,16 +852,25 @@ setReq.onsuccess = function() {
 }
 setReq.onerror = function() {
     marionetteScriptFinished(false);
 }""", script_timeout=60000)
 
         if not result:
             raise Exception("Could not launch test container app")
 
+    def record_crash(self):
+        crash = True
+        try:
+            crash = self.marionette.check_for_crash()
+            self.crashed += int(crash)
+        except Exception:
+            traceback.print_exc()
+        return crash
+
     def run_tests(self, tests):
         assert len(tests) > 0
         assert len(self.test_handlers) > 0
         self.reset_test_stats()
         self.start_time = time.time()
 
         need_external_ip = True
         if not self.marionette:
@@ -959,21 +973,17 @@ setReq.onerror = function() {
         else:
             self.logger.info('todo: %d (skipped: %d)' % (self.todo, self.skipped))
 
         if self.failed > 0:
             self.logger.info('\nFAILED TESTS\n-------')
             for failed_test in self.failures:
                 self.logger.info('%s' % failed_test[0])
 
-        try:
-            self.marionette.check_for_crash()
-        except:
-            traceback.print_exc()
-
+        self.record_crash()
         self.end_time = time.time()
         self.elapsedtime = self.end_time - self.start_time
 
         if self.marionette.instance:
             self.marionette.instance.close()
             self.marionette.instance = None
 
         self.marionette.cleanup()
@@ -1130,17 +1140,17 @@ setReq.onerror = function() {
 
     def run_test_set(self, tests):
         if self.shuffle:
             random.seed(self.shuffle_seed)
             random.shuffle(tests)
 
         for test in tests:
             self.run_test(test['filepath'], test['expected'], test['test_container'])
-            if self.marionette.check_for_crash():
+            if self.record_crash():
                 break
 
     def run_test_sets(self):
         if len(self.tests) < 1:
             raise Exception('There are no tests to run.')
         elif self.total_chunks > len(self.tests):
             raise ValueError('Total number of chunks must be between 1 and %d.' % len(self.tests))
         if self.total_chunks > 1:
--- a/testing/marionette/harness/marionette/runtests.py
+++ b/testing/marionette/harness/marionette/runtests.py
@@ -60,17 +60,17 @@ class MarionetteHarness(object):
             MarionetteTestCase.pydebugger = __import__(self.args['pydebugger'])
 
     def run(self):
         try:
             self.process_args()
             tests = self.args.pop('tests')
             runner = self._runner_class(**self.args)
             runner.run_tests(tests)
-            return runner.failed
+            return runner.failed + runner.crashed
         except Exception:
             logger = self.args.get('logger')
             if logger:
                 logger.error('Failure during test execution.',
                                        exc_info=True)
             raise
 
 
--- a/testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
+++ b/testing/marionette/harness/marionette/tests/harness_unit/test_marionette_runner.py
@@ -1,24 +1,61 @@
 # This Source Code Form is subject to the terms of the Mozilla Public
 # License, v. 2.0. If a copy of the MPL was not distributed with this
 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
 import pytest
-from mock import patch, Mock
+from mock import patch, Mock, DEFAULT
 
 from marionette.runtests import (
     MarionetteTestRunner,
     MarionetteHarness,
     cli
 )
+from marionette.runner import MarionetteTestResult
 
 # avoid importing MarionetteJSTestCase to prevent pytest from
 # collecting and running it as part of this test suite
 import marionette.marionette_test as marionette_test
 
+
+def _check_crash_counts(has_crashed, runner, mock_marionette):
+    if has_crashed:
+        assert mock_marionette.check_for_crash.call_count == 1
+        assert runner.crashed == 1
+    else:
+        assert runner.crashed == 0
+
+
+@pytest.fixture()
+def mock_marionette(request):
+    """ Mock marionette instance """
+    import marionette_driver
+    marionette = Mock(spec=marionette_driver.marionette.Marionette)
+    if 'has_crashed' in request.funcargnames:
+        marionette.check_for_crash.return_value = request.getfuncargvalue(
+            'has_crashed'
+        )
+    return marionette
+
+
+@pytest.fixture()
+def empty_marionette_testcase():
+    """ Testable MarionetteTestCase class """
+    class EmptyTestCase(marionette_test.MarionetteTestCase):
+        def test_nothing(self):
+            pass
+
+    return EmptyTestCase
+
+
+@pytest.fixture()
+def empty_marionette_test(mock_marionette, empty_marionette_testcase):
+    return empty_marionette_testcase(lambda: mock_marionette, 'test_nothing')
+
+
 @pytest.fixture(scope='module')
 def logger():
     """
     Fake logger to help with mocking out other runner-related classes.
     """
     import mozlog
     return Mock(spec=mozlog.structuredlog.StructuredLogger)
 
@@ -86,78 +123,92 @@ def mach_parsed_kwargs(logger):
          'tree': 'b2g',
          'type': None,
          'verbose': None,
          'workspace': None,
          'logger': logger,
     }
 
 
+@pytest.fixture()
+def runner(mach_parsed_kwargs):
+    """
+    MarionetteTestRunner instance initialized with default options.
+    """
+    return MarionetteTestRunner(**mach_parsed_kwargs)
+
 
 @pytest.fixture
 def harness_class(request):
     """
     Mock based on MarionetteHarness whose run method just returns a number of
     failures according to the supplied test parameter
     """
-    if 'num_failures' in request.funcargnames:
-        failures = request.getfuncargvalue('num_failures')
+    if 'num_fails_crashed' in request.funcargnames:
+        num_fails_crashed = request.getfuncargvalue('num_fails_crashed')
     else:
-        failures = 0
+        num_fails_crashed = (0, 0)
     harness_cls = Mock(spec=MarionetteHarness)
     harness = harness_cls.return_value
-    if failures is None:
+    if num_fails_crashed is None:
         harness.run.side_effect = Exception
     else:
-        harness.run.return_value = failures
+        harness.run.return_value = sum(num_fails_crashed)
     return harness_cls
 
 
 @pytest.fixture
 def runner_class(request):
     """
-    Mock based on MarionetteTestRunner, wherein the runner.failed
-    attribute is provided by a test parameter
+    Mock based on MarionetteTestRunner, wherein the runner.failed,
+    runner.crashed attributes are provided by a test parameter
     """
-    if 'num_failures' in request.funcargnames:
-        failures = request.getfuncargvalue('num_failures')
+    if 'num_fails_crashed' in request.funcargnames:
+        failures, crashed = request.getfuncargvalue('num_fails_crashed')
     else:
         failures = 0
+        crashed = 0
     mock_runner_class = Mock(spec=MarionetteTestRunner)
     runner = mock_runner_class.return_value
     runner.failed = failures
+    runner.crashed = crashed
     return mock_runner_class
 
 
 @pytest.mark.parametrize(
-    "num_failures,exit_code",
-    [(0, 0), (1, 10), (None, 1)],
+    "num_fails_crashed,exit_code",
+    [((0, 0), 0), ((1, 0), 10), ((0, 1), 10), (None, 1)],
 )
-def test_cli_exit_code(num_failures, exit_code, harness_class):
+def test_cli_exit_code(num_fails_crashed, exit_code, harness_class):
     with pytest.raises(SystemExit) as err:
         cli(harness_class=harness_class)
     assert err.value.code == exit_code
 
 
-@pytest.mark.parametrize("num_failures", [0, 1])
+@pytest.mark.parametrize("num_fails_crashed", [(0, 0), (1, 0), (1, 1)])
 def test_call_harness_with_parsed_args_yields_num_failures(mach_parsed_kwargs,
                                                            runner_class,
-                                                           num_failures):
-    with patch('marionette.runtests.MarionetteHarness.parse_args') as parse_args:
-        failed = MarionetteHarness(runner_class, args=mach_parsed_kwargs).run()
+                                                           num_fails_crashed):
+    with patch(
+        'marionette.runtests.MarionetteHarness.parse_args'
+    ) as parse_args:
+        failed_or_crashed = MarionetteHarness(runner_class,
+                                              args=mach_parsed_kwargs).run()
         parse_args.assert_not_called()
-    assert failed == num_failures
+    assert failed_or_crashed == sum(num_fails_crashed)
 
 
 def test_call_harness_with_no_args_yields_num_failures(runner_class):
-    with patch('marionette.runtests.MarionetteHarness.parse_args') as parse_args:
-        parse_args.return_value = {'tests':[]}
-        failed = MarionetteHarness(runner_class).run()
+    with patch(
+        'marionette.runtests.MarionetteHarness.parse_args'
+    ) as parse_args:
+        parse_args.return_value = {'tests': []}
+        failed_or_crashed = MarionetteHarness(runner_class).run()
         assert parse_args.call_count == 1
-    assert failed == 0
+    assert failed_or_crashed == 0
 
 
 def test_harness_sets_up_default_test_handlers(mach_parsed_kwargs):
     """
     If the necessary TestCase is not in test_handlers,
     tests are omitted silently
     """
     harness = MarionetteHarness(args=mach_parsed_kwargs)
@@ -165,29 +216,77 @@ def test_harness_sets_up_default_test_ha
     runner = harness._runner_class(**mach_parsed_kwargs)
     assert marionette_test.MarionetteTestCase in runner.test_handlers
     assert marionette_test.MarionetteJSTestCase in runner.test_handlers
 
 
 def test_parsing_testvars(mach_parsed_kwargs):
     mach_parsed_kwargs.pop('tests')
     testvars_json_loads = [
-        {"wifi":{"ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo"}},
-        {"wifi":{"PEAP": "bar"}, "device": {"stuff": "buzz"}}
+        {"wifi": {"ssid": "blah", "keyManagement": "WPA-PSK", "psk": "foo"}},
+        {"wifi": {"PEAP": "bar"}, "device": {"stuff": "buzz"}}
     ]
     expected_dict = {
          "wifi": {
              "ssid": "blah",
              "keyManagement": "WPA-PSK",
              "psk": "foo",
-             "PEAP":"bar"
+             "PEAP": "bar"
          },
-         "device": {"stuff":"buzz"}
+         "device": {"stuff": "buzz"}
     }
-    with patch('marionette.runtests.MarionetteTestRunner._load_testvars') as load:
+    with patch(
+        'marionette.runtests.MarionetteTestRunner._load_testvars'
+    ) as load:
         load.return_value = testvars_json_loads
         runner = MarionetteTestRunner(**mach_parsed_kwargs)
         assert runner.testvars == expected_dict
         assert load.call_count == 1
 
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_crash_is_recorded_as_error(empty_marionette_test,
+                                    logger,
+                                    has_crashed):
+    """ Number of errors is incremented by stopTest iff has_crashed is true """
+    # collect results from the empty test
+    result = MarionetteTestResult(
+        marionette=empty_marionette_test._marionette_weakref(),
+        b2g_pid=0, logcat_stdout=False, logger=logger, verbosity=None,
+        stream=None, descriptions=None,
+    )
+    result.startTest(empty_marionette_test)
+    assert len(result.errors) == 0
+    assert len(result.failures) == 0
+    assert result.testsRun == 1
+    assert result.shouldStop == False
+    result.stopTest(empty_marionette_test)
+    assert result.shouldStop == has_crashed
+    if has_crashed:
+        assert len(result.errors) == 1
+    else:
+        assert len(result.errors) == 0
+
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_increment_crash_count_in_run_test_set(runner, has_crashed,
+                                               mock_marionette):
+    fake_tests = [{'filepath': i,
+                   'expected': 'pass',
+                   'test_container': False} for i in 'abc']
+
+    with patch.multiple(runner, run_test=DEFAULT, marionette=mock_marionette):
+        runner.run_test_set(fake_tests)
+        if not has_crashed:
+            assert runner.marionette.check_for_crash.call_count == len(fake_tests)
+        _check_crash_counts(has_crashed, runner, runner.marionette)
+
+
+@pytest.mark.parametrize("has_crashed", [True, False])
+def test_record_crash(runner, has_crashed, mock_marionette):
+    with patch.object(runner, 'marionette', mock_marionette):
+        assert runner.record_crash() == has_crashed
+        _check_crash_counts(has_crashed, runner, runner.marionette)
+
+
 if __name__ == '__main__':
     import sys
     sys.exit(pytest.main(['--verbose', __file__]))