| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 | |
| 6 import logging | |
| 7 import re | |
| 8 import os | |
| 9 import pexpect | |
| 10 | |
| 11 from perf_tests_helper import PrintPerfResult | |
| 12 from test_result import BaseTestResult, TestResults | |
| 13 from valgrind_tools import CreateTool | |
| 14 | |
| 15 | |
| 16 # TODO(bulach): TestPackage, TestPackageExecutable and | |
| 17 # TestPackageApk are a work in progress related to making the native tests | |
| 18 # run as a NDK-app from an APK rather than a stand-alone executable. | |
| 19 class TestPackage(object): | |
| 20 """A helper base class for both APK and stand-alone executables. | |
| 21 | |
| 22 Args: | |
| 23 adb: ADB interface the tests are using. | |
| 24 device: Device to run the tests. | |
| 25 test_suite: A specific test suite to run, empty to run all. | |
| 26 timeout: Timeout for each test. | |
| 27 rebaseline: Whether or not to run tests in isolation and update the filter. | |
| 28 performance_test: Whether or not performance test(s). | |
| 29 cleanup_test_files: Whether or not to cleanup test files on device. | |
| 30 tool: Name of the Valgrind tool. | |
| 31 dump_debug_info: A debug_info object. | |
| 32 """ | |
| 33 | |
| 34 def __init__(self, adb, device, test_suite, timeout, rebaseline, | |
| 35 performance_test, cleanup_test_files, tool, dump_debug_info): | |
| 36 self.adb = adb | |
| 37 self.device = device | |
| 38 self.test_suite_full = test_suite | |
| 39 self.test_suite = os.path.splitext(test_suite)[0] | |
| 40 self.test_suite_basename = self._GetTestSuiteBaseName() | |
| 41 self.test_suite_dirname = os.path.dirname( | |
| 42 self.test_suite.split(self.test_suite_basename)[0]); | |
| 43 self.rebaseline = rebaseline | |
| 44 self.performance_test = performance_test | |
| 45 self.cleanup_test_files = cleanup_test_files | |
| 46 self.tool = CreateTool(tool, self.adb) | |
| 47 if timeout == 0: | |
| 48 if self.test_suite_basename == 'page_cycler_tests': | |
| 49 timeout = 900 | |
| 50 else: | |
| 51 timeout = 60 | |
| 52 # On a VM (e.g. chromium buildbots), this timeout is way too small. | |
| 53 if os.environ.get('BUILDBOT_SLAVENAME'): | |
| 54 timeout = timeout * 2 | |
| 55 self.timeout = timeout * self.tool.GetTimeoutScale() | |
| 56 self.dump_debug_info = dump_debug_info | |
| 57 | |
| 58 def _BeginGetIOStats(self): | |
| 59 """Gets I/O statistics before running test. | |
| 60 | |
| 61 Return: | |
| 62 Tuple of (I/O stats object, flag of ready to continue). When encountering | |
| 63 error, ready-to-continue flag is False, True otherwise. The I/O stats | |
| 64 object may be None if the test is not performance test. | |
| 65 """ | |
| 66 initial_io_stats = None | |
| 67 # Try to get the disk I/O statistics for all performance tests. | |
| 68 if self.performance_test and not self.rebaseline: | |
| 69 initial_io_stats = self.adb.GetIoStats() | |
| 70 # Get rid of the noise introduced by launching Chrome for page cycler. | |
| 71 if self.test_suite_basename == 'page_cycler_tests': | |
| 72 try: | |
| 73 chrome_launch_done_re = re.compile( | |
| 74 re.escape('Finish waiting for browser launch!')) | |
| 75 self.adb.WaitForLogMatch(chrome_launch_done_re) | |
| 76 initial_io_stats = self.adb.GetIoStats() | |
| 77 except pexpect.TIMEOUT: | |
| 78 logging.error('Test terminated because Chrome launcher has no' | |
| 79 'response after 120 second.') | |
| 80 return (None, False) | |
| 81 finally: | |
| 82 if self.dump_debug_info: | |
| 83 self.dump_debug_info.TakeScreenshot('_Launch_Chrome_') | |
| 84 return (initial_io_stats, True) | |
| 85 | |
| 86 def _EndGetIOStats(self, initial_io_stats): | |
| 87 """Gets I/O statistics after running test and calcuate the I/O delta. | |
| 88 | |
| 89 Args: | |
| 90 initial_io_stats: I/O stats object got from _BeginGetIOStats. | |
| 91 | |
| 92 Return: | |
| 93 String for formated diso I/O statistics. | |
| 94 """ | |
| 95 disk_io = '' | |
| 96 if self.performance_test and initial_io_stats: | |
| 97 final_io_stats = self.adb.GetIoStats() | |
| 98 for stat in final_io_stats: | |
| 99 disk_io += '\n' + PrintPerfResult(stat, stat, | |
| 100 [final_io_stats[stat] - | |
| 101 initial_io_stats[stat]], | |
| 102 stat.split('_')[1], True, False) | |
| 103 logging.info(disk_io) | |
| 104 return disk_io | |
| 105 | |
| 106 def GetDisabledPrefixes(self): | |
| 107 return ['DISABLED_', 'FLAKY_', 'FAILS_'] | |
| 108 | |
| 109 def _ParseGTestListTests(self, all_tests): | |
| 110 ret = [] | |
| 111 current = '' | |
| 112 disabled_prefixes = self.GetDisabledPrefixes() | |
| 113 for test in all_tests: | |
| 114 if not test: | |
| 115 continue | |
| 116 if test[0] != ' ': | |
| 117 current = test | |
| 118 continue | |
| 119 if 'YOU HAVE' in test: | |
| 120 break | |
| 121 test_name = test[2:] | |
| 122 if not any([test_name.startswith(x) for x in disabled_prefixes]): | |
| 123 ret += [current + test_name] | |
| 124 return ret | |
| 125 | |
| 126 def PushDataAndPakFiles(self): | |
| 127 if self.test_suite_basename == 'ui_unittests': | |
| 128 self.adb.PushIfNeeded(self.test_suite_dirname + '/chrome.pak', | |
| 129 '/data/local/tmp/paks/chrome.pak') | |
| 130 self.adb.PushIfNeeded(self.test_suite_dirname + '/locales/en-US.pak', | |
| 131 '/data/local/tmp/paks/en-US.pak') | |
| 132 | |
| 133 def _WatchTestOutput(self, p): | |
| 134 """Watches the test output. | |
| 135 Args: | |
| 136 p: the process generating output as created by pexpect.spawn. | |
| 137 """ | |
| 138 ok_tests = [] | |
| 139 failed_tests = [] | |
| 140 crashed_tests = [] | |
| 141 timed_out = False | |
| 142 overall_fail = False | |
| 143 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | |
| 144 # APK tests rely on the END tag. | |
| 145 re_end = re.compile('\[ END \] ?(.*)\r\n') | |
| 146 # Signal handlers are installed before starting tests | |
| 147 # to output the CRASHED marker when a crash happens. | |
| 148 re_crash = re.compile('\[ CRASHED \](.*)\r\n') | |
| 149 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | |
| 150 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | |
| 151 re_ok = re.compile('\[ OK \] ?(.*)\r\n') | |
| 152 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() | |
| 153 while ready_to_continue: | |
| 154 found = p.expect([re_run, pexpect.EOF, re_end, re_runner_fail], | |
| 155 timeout=self.timeout) | |
| 156 if found == 1: # matched pexpect.EOF | |
| 157 break | |
| 158 if found == 2: # matched END. | |
| 159 break | |
| 160 if found == 3: # RUNNER_FAILED | |
| 161 logging.error('RUNNER_FAILED') | |
| 162 overall_fail = True | |
| 163 break | |
| 164 if self.dump_debug_info: | |
| 165 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') | |
| 166 full_test_name = p.match.group(1) | |
| 167 found = p.expect([re_ok, re_fail, re_crash, pexpect.EOF, pexpect.TIMEOUT], | |
| 168 timeout=self.timeout) | |
| 169 if found == 0: # re_ok | |
| 170 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''), | |
| 171 p.before)] | |
| 172 continue | |
| 173 if found == 2: # re_crash | |
| 174 crashed_tests += [BaseTestResult(full_test_name.replace('\r', ''), | |
| 175 p.before)] | |
| 176 overall_fail = True | |
| 177 break | |
| 178 # The test failed. | |
| 179 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), | |
| 180 p.before)] | |
| 181 if found >= 3: | |
| 182 # The test bailed out (i.e., didn't print OK or FAIL). | |
| 183 if found == 4: # pexpect.TIMEOUT | |
| 184 logging.error('Test terminated after %d second timeout.', | |
| 185 self.timeout) | |
| 186 timed_out = True | |
| 187 break | |
| 188 p.close() | |
| 189 if not self.rebaseline and ready_to_continue: | |
| 190 ok_tests += self._EndGetIOStats(io_stats_before) | |
| 191 ret_code = self._GetGTestReturnCode() | |
| 192 if ret_code: | |
| 193 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | |
| 194 'pexpect.before: %s' | |
| 195 '\npexpect.after: %s' | |
| 196 % (p.before, | |
| 197 p.after))] | |
| 198 # Create TestResults and return | |
| 199 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | |
| 200 crashed=crashed_tests, timed_out=timed_out, | |
| 201 overall_fail=overall_fail) | |
| OLD | NEW |