OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python |
| 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 |
| 7 import logging |
| 8 import re |
| 9 import os |
| 10 import pexpect |
| 11 |
| 12 from perf_tests_helper import PrintPerfResult |
| 13 from test_result import BaseTestResult, TestResults |
| 14 from valgrind_tools import CreateTool |
| 15 |
| 16 |
| 17 # TODO(bulach): TestPackage, TestPackageExecutable and |
| 18 # TestPackageApk are a work in progress related to making the native tests |
| 19 # run as a NDK-app from an APK rather than a stand-alone executable. |
| 20 class TestPackage(object): |
| 21 """A helper base class for both APK and stand-alone executables. |
| 22 |
| 23 Args: |
| 24 adb: ADB interface the tests are using. |
| 25 device: Device to run the tests. |
| 26 test_suite: A specific test suite to run, empty to run all. |
| 27 timeout: Timeout for each test. |
| 28 rebaseline: Whether or not to run tests in isolation and update the filter. |
| 29 performance_test: Whether or not performance test(s). |
| 30 cleanup_test_files: Whether or not to cleanup test files on device. |
| 31 tool: Name of the Valgrind tool. |
| 32 dump_debug_info: A debug_info object. |
| 33 """ |
| 34 |
| 35 def __init__(self, adb, device, test_suite, timeout, rebaseline, |
| 36 performance_test, cleanup_test_files, tool, dump_debug_info): |
| 37 self.adb = adb |
| 38 self.device = device |
| 39 self.test_suite = os.path.splitext(test_suite)[0] |
| 40 self.test_suite_basename = os.path.basename(self.test_suite) |
| 41 self.test_suite_dirname = os.path.dirname(self.test_suite) |
| 42 self.rebaseline = rebaseline |
| 43 self._performance_test = performance_test |
| 44 self.cleanup_test_files = cleanup_test_files |
| 45 self.tool = CreateTool(tool, self.adb) |
| 46 if timeout == 0: |
| 47 if self.test_suite_basename == 'page_cycler_tests': |
| 48 timeout = 900 |
| 49 else: |
| 50 timeout = 60 |
| 51 self.timeout = timeout * self.tool.GetTimeoutScale() |
| 52 self.dump_debug_info = dump_debug_info |
| 53 |
| 54 def _BeginGetIOStats(self): |
| 55 """Gets I/O statistics before running test. |
| 56 |
| 57 Return: |
| 58 Tuple of (I/O stats object, flag of ready to continue). When encountering |
| 59 error, ready-to-continue flag is False, True otherwise. The I/O stats |
| 60 object may be None if the test is not performance test. |
| 61 """ |
| 62 initial_io_stats = None |
| 63 # Try to get the disk I/O statistics for all performance tests. |
| 64 if self._performance_test and not self.rebaseline: |
| 65 initial_io_stats = self.adb.GetIoStats() |
| 66 # Get rid of the noise introduced by launching Chrome for page cycler. |
| 67 if self.test_suite_basename == 'page_cycler_tests': |
| 68 try: |
| 69 chrome_launch_done_re = re.compile( |
| 70 re.escape('Finish waiting for browser launch!')) |
| 71 self.adb.WaitForLogMatch(chrome_launch_done_re) |
| 72 initial_io_stats = self.adb.GetIoStats() |
| 73 except pexpect.TIMEOUT: |
| 74 logging.error('Test terminated because Chrome launcher has no' |
| 75 'response after 120 second.') |
| 76 return (None, False) |
| 77 finally: |
| 78 if self.dump_debug_info: |
| 79 self.dump_debug_info.TakeScreenshot('_Launch_Chrome_') |
| 80 return (initial_io_stats, True) |
| 81 |
| 82 def _EndGetIOStats(self, initial_io_stats): |
| 83 """Gets I/O statistics after running test and calcuate the I/O delta. |
| 84 |
| 85 Args: |
| 86 initial_io_stats: I/O stats object got from _BeginGetIOStats. |
| 87 |
| 88 Return: |
| 89 String for formated diso I/O statistics. |
| 90 """ |
| 91 disk_io = '' |
| 92 if self._performance_test and initial_io_stats: |
| 93 final_io_stats = self.adb.GetIoStats() |
| 94 for stat in final_io_stats: |
| 95 disk_io += '\n' + PrintPerfResult(stat, stat, |
| 96 [final_io_stats[stat] - |
| 97 initial_io_stats[stat]], |
| 98 stat.split('_')[1], True, False) |
| 99 logging.info(disk_io) |
| 100 return disk_io |
| 101 |
| 102 def GetDisabledPrefixes(self): |
| 103 return ['DISABLED_', 'FLAKY_', 'FAILS_'] |
| 104 |
| 105 def _ParseGTestListTests(self, all_tests): |
| 106 ret = [] |
| 107 current = '' |
| 108 disabled_prefixes = self.GetDisabledPrefixes() |
| 109 for test in all_tests: |
| 110 if not test: |
| 111 continue |
| 112 if test[0] != ' ': |
| 113 current = test |
| 114 continue |
| 115 if 'YOU HAVE' in test: |
| 116 break |
| 117 test_name = test[2:] |
| 118 if not any([test_name.startswith(x) for x in disabled_prefixes]): |
| 119 ret += [current + test_name] |
| 120 return ret |
| 121 |
| 122 def _WatchTestOutput(self, p): |
| 123 """Watches the test output. |
| 124 Args: |
| 125 p: the process generating output as created by pexpect.spawn. |
| 126 """ |
| 127 ok_tests = [] |
| 128 failed_tests = [] |
| 129 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
| 130 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
| 131 re_ok = re.compile('\[ OK \] ?(.*)\r\n') |
| 132 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() |
| 133 while ready_to_continue: |
| 134 found = p.expect([re_run, pexpect.EOF], timeout=self.timeout) |
| 135 if found == 1: # matched pexpect.EOF |
| 136 break |
| 137 if self.dump_debug_info: |
| 138 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') |
| 139 full_test_name = p.match.group(1) |
| 140 found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT], |
| 141 timeout=self.timeout) |
| 142 if found == 0: # re_ok |
| 143 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''), |
| 144 p.before)] |
| 145 continue |
| 146 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), |
| 147 p.before)] |
| 148 if found >= 2: |
| 149 # The test crashed / bailed out (i.e., didn't print OK or FAIL). |
| 150 if found == 3: # pexpect.TIMEOUT |
| 151 logging.error('Test terminated after %d second timeout.', |
| 152 self.timeout) |
| 153 break |
| 154 p.close() |
| 155 if not self.rebaseline and ready_to_continue: |
| 156 ok_tests += self._EndGetIOStats(io_stats_before) |
| 157 ret_code = self._GetGTestReturnCode() |
| 158 if ret_code: |
| 159 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
| 160 'pexpect.before: %s' |
| 161 '\npexpect.after: %s' |
| 162 % (p.before, |
| 163 p.after))] |
| 164 return TestResults.FromOkAndFailed(ok_tests, failed_tests) |
OLD | NEW |