Chromium Code Reviews| Index: build/android/pylib/instrumentation/test_runner.py |
| diff --git a/build/android/pylib/instrumentation/test_runner.py b/build/android/pylib/instrumentation/test_runner.py |
| index e526bff29a35a4d6eb8b40ad8775f3206a847215..85a523ce91d36e2919365389735416613a0b2e27 100644 |
| --- a/build/android/pylib/instrumentation/test_runner.py |
| +++ b/build/android/pylib/instrumentation/test_runner.py |
| @@ -27,6 +27,9 @@ import perf_tests_results_helper # pylint: disable=F0401 |
| _PERF_TEST_ANNOTATION = 'PerfTest' |
| +_PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' |
| +_PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' |
| +_COMMAND_LINE_PARAMETER = 'cmdlinearg-parameter' |
| class TestRunner(base_test_runner.BaseTestRunner): |
|
jbudorick
2015/10/27 16:14:37
beware that I'm actively working on deprecating th
mnaganov (inactive)
2015/10/27 23:53:30
No problem, how do you run it?
jbudorick
2015/10/28 16:36:35
It's a bit verbose at the moment, but I'm working
|
| @@ -123,7 +126,7 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| self.flags.Restore() |
| super(TestRunner, self).TearDown() |
| - def TestSetup(self, test): |
| + def TestSetup(self, test, flag_modifiers): |
| """Sets up the test harness for running a particular test. |
| Args: |
| @@ -133,8 +136,8 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| self._SetupIndividualTestTimeoutScale(test) |
| self.tool.SetupEnvironment() |
| - if self.flags and self._IsFreTest(test): |
| - self.flags.RemoveFlags(['--disable-fre']) |
| + if self.flags: |
| + self.flags.PushFlags(add=flag_modifiers[0], remove=flag_modifiers[1]) |
| # Make sure the forwarder is still running. |
| self._RestartHttpServerForwarderIfNecessary() |
| @@ -171,6 +174,54 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| """ |
| return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test) |
| + def _GetTestCmdlineParameters(self, test): |
| + """Determines whether the test is parameterized to be run with different |
| + command-line flags. |
| + |
| + Args: |
| + test: The name of the test to be checked. |
| + |
| + Returns: |
| + If the test is parameterized, returns a list of add/remove tuples |
| + with lists of flags, e.g.: |
| + |
| + [(["--flag-to-add"], []), ([], ["--flag-to-remove"]), ([], [])] |
|
jbudorick
2015/10/27 16:14:36
This would be more readable if it returned a colle
mnaganov (inactive)
2015/10/27 23:53:30
Thanks! I'm learning neat Python tricks with you :
|
| + |
| + That means, the test must be run three times, the first time with |
| + "--flag-to-add" added to command-line, the second time with |
| + "--flag-to-remove" to be removed from command-line, and the third time |
| + with default command-line args. If the same flag is listed in both |
| + sections, it is left unchanged. |
| + |
| + """ |
| + parameterized_tests = [] |
| + annotations = self.test_pkg.GetTestAnnotations(test) |
| + if _PARAMETERIZED_TEST_ANNOTATION in annotations: |
| + parameterized_tests = [annotations[_PARAMETERIZED_TEST_ANNOTATION]] |
| + elif _PARAMETERIZED_TEST_SET_ANNOTATION in annotations: |
| + if annotations[_PARAMETERIZED_TEST_SET_ANNOTATION]: |
| + parameterized_tests = annotations[ |
| + _PARAMETERIZED_TEST_SET_ANNOTATION].get('tests', []) |
| + else: |
| + return [([], [])] |
| + |
| + result = [] |
| + for pt in parameterized_tests: |
| + if not pt: |
| + continue |
| + for p in pt['parameters']: |
| + if p['tag'] == _COMMAND_LINE_PARAMETER: |
| + to_add = [] |
| + to_remove = [] |
| + if 'arguments' in p: |
| + for a in p['arguments']: |
| + if a['name'] == 'add': |
| + to_add = a['stringArray'] |
| + elif a['name'] == 'remove': |
| + to_remove = a['stringArray'] |
| + result.append((to_add, to_remove)) |
| + return result if result else [([], [])] |
| + |
| def SetupPerfMonitoringIfNeeded(self, test): |
| """Sets up performance monitoring if the specified test requires it. |
| @@ -184,7 +235,7 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| self._logcat_monitor = self.device.GetLogcatMonitor() |
| self._logcat_monitor.Start() |
| - def TestTeardown(self, test, result): |
| + def TestTeardown(self, test, results): |
| """Cleans up the test harness after running a particular test. |
| Depending on the options of this TestRunner this might handle performance |
| @@ -192,17 +243,17 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| Args: |
| test: The name of the test that was just run. |
| - result: result for this test. |
| + results: results for this test. |
| """ |
| self.tool.CleanUpEnvironment() |
| - if self.flags and self._IsFreTest(test): |
| + if self.flags: |
| self.flags.Restore() |
| - if not result: |
| + if not results: |
| return |
| - if result.DidRunPass(): |
| + if results.DidRunPass(): |
| self.TearDownPerfMonitoring(test) |
| if self.coverage_dir: |
| @@ -345,40 +396,59 @@ class TestRunner(base_test_runner.BaseTestRunner): |
| self._GetIndividualTestTimeoutScale(test) * |
| self.tool.GetTimeoutScale()) |
| - start_ms = 0 |
| - duration_ms = 0 |
| - try: |
| - self.TestSetup(test) |
| - |
| + cmdline_parameters = self._GetTestCmdlineParameters(test) |
|
jbudorick
2015/10/27 16:14:37
This should be part of test list generation, not t
mnaganov (inactive)
2015/10/27 23:53:30
But... isn't test list consist of just test names?
jbudorick
2015/10/28 16:36:35
Oh right, that's true in this version. In the new
|
| + for cmdline_modifiers in cmdline_parameters: |
| + start_ms = 0 |
| + duration_ms = 0 |
| try: |
| - self.device.GoHome() |
| - except device_errors.CommandTimeoutError: |
| - logging.exception('Failed to focus the launcher.') |
| - |
| - time_ms = lambda: int(time.time() * 1000) |
| - start_ms = time_ms() |
| - raw_output = self._RunTest(test, timeout) |
| - duration_ms = time_ms() - start_ms |
| - |
| - # Parse the test output |
| - result_code, result_bundle, statuses = ( |
| - instrumentation_test_instance.ParseAmInstrumentRawOutput(raw_output)) |
| - result = self._GenerateTestResult( |
| - test, result_code, result_bundle, statuses, start_ms, duration_ms) |
| - if local_device_instrumentation_test_run.DidPackageCrashOnDevice( |
| - self.test_pkg.GetPackageName(), self.device): |
| - result.SetType(base_test_result.ResultType.CRASH) |
| - results.AddResult(result) |
| - except device_errors.CommandTimeoutError as e: |
| - results.AddResult(test_result.InstrumentationTestResult( |
| + flag_modifiers = cmdline_modifiers[:] |
| + if self._IsFreTest(test): |
|
jbudorick
2015/10/27 16:14:37
We should be able to remove this in light of https
mnaganov (inactive)
2015/10/27 23:53:30
Oh, cool. I thought that maybe it was again import
|
| + flag_modifiers[1].append('--disable-fre') |
| + self.TestSetup(test, flag_modifiers) |
| + |
| + try: |
| + self.device.GoHome() |
| + except device_errors.CommandTimeoutError: |
| + logging.exception('Failed to focus the launcher.') |
| + |
| + time_ms = lambda: int(time.time() * 1000) |
| + start_ms = time_ms() |
| + raw_output = self._RunTest(test, timeout) |
| + duration_ms = time_ms() - start_ms |
| + |
| + # Parse the test output |
| + result_code, result_bundle, statuses = ( |
| + instrumentation_test_instance.ParseAmInstrumentRawOutput( |
| + raw_output)) |
| + result = self._GenerateTestResult( |
| + test, result_code, result_bundle, statuses, start_ms, duration_ms) |
| + if local_device_instrumentation_test_run.DidPackageCrashOnDevice( |
| + self.test_pkg.GetPackageName(), self.device): |
| + result.SetType(base_test_result.ResultType.CRASH) |
| + if len(cmdline_parameters): |
| + result.SetName( |
| + result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') |
|
jbudorick
2015/10/27 16:14:37
This name should be generated once, presumably bef
mnaganov (inactive)
2015/10/27 23:53:30
We can't change the 'test' string, because it is t
|
| + results.AddResult(result) |
| + except device_errors.CommandTimeoutError as e: |
| + result = test_result.InstrumentationTestResult( |
| test, base_test_result.ResultType.TIMEOUT, start_ms, duration_ms, |
| - log=str(e) or 'No information')) |
| - if self.package_info: |
| - self.device.ForceStop(self.package_info.package) |
| - self.device.ForceStop(self.package_info.test_package) |
| - except device_errors.DeviceUnreachableError as e: |
| - results.AddResult(test_result.InstrumentationTestResult( |
| - test, base_test_result.ResultType.CRASH, start_ms, duration_ms, |
| - log=str(e) or 'No information')) |
| - self.TestTeardown(test, results) |
| + log=str(e) or 'No information') |
| + if len(cmdline_parameters): |
| + result.SetName( |
| + result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') |
| + results.AddResult(result) |
| + if self.package_info: |
| + self.device.ForceStop(self.package_info.package) |
| + self.device.ForceStop(self.package_info.test_package) |
| + except device_errors.DeviceUnreachableError as e: |
| + result = test_result.InstrumentationTestResult( |
| + test, base_test_result.ResultType.CRASH, start_ms, duration_ms, |
| + log=str(e) or 'No information') |
| + if len(cmdline_parameters): |
| + result.SetName( |
| + result.GetName() + ' with {' + ' '.join(cmdline_modifiers[0]) + '}') |
| + results.AddResult(result) |
| + |
| + self.TestTeardown(test, results) |
| + |
| return (results, None if results.DidRunPass() else test) |