OLD | NEW |
---|---|
1 # Copyright 2012 the V8 project authors. All rights reserved. | 1 # Copyright 2012 the V8 project authors. All rights reserved. |
2 # Redistribution and use in source and binary forms, with or without | 2 # Redistribution and use in source and binary forms, with or without |
3 # modification, are permitted provided that the following conditions are | 3 # modification, are permitted provided that the following conditions are |
4 # met: | 4 # met: |
5 # | 5 # |
6 # * Redistributions of source code must retain the above copyright | 6 # * Redistributions of source code must retain the above copyright |
7 # notice, this list of conditions and the following disclaimer. | 7 # notice, this list of conditions and the following disclaimer. |
8 # * Redistributions in binary form must reproduce the above | 8 # * Redistributions in binary form must reproduce the above |
9 # copyright notice, this list of conditions and the following | 9 # copyright notice, this list of conditions and the following |
10 # disclaimer in the documentation and/or other materials provided | 10 # disclaimer in the documentation and/or other materials provided |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
74 | 74 |
75 def _CommonInit(self, num_tests, progress_indicator, context): | 75 def _CommonInit(self, num_tests, progress_indicator, context): |
76 self.indicator = progress_indicator | 76 self.indicator = progress_indicator |
77 progress_indicator.runner = self | 77 progress_indicator.runner = self |
78 self.context = context | 78 self.context = context |
79 self.succeeded = 0 | 79 self.succeeded = 0 |
80 self.total = num_tests | 80 self.total = num_tests |
81 self.remaining = num_tests | 81 self.remaining = num_tests |
82 self.failed = [] | 82 self.failed = [] |
83 self.crashed = 0 | 83 self.crashed = 0 |
84 self.reran_tests = 0 | |
84 | 85 |
85 def _RunPerfSafe(self, fun): | 86 def _RunPerfSafe(self, fun): |
86 try: | 87 try: |
87 fun() | 88 fun() |
88 except Exception, e: | 89 except Exception, e: |
89 print("PerfData exception: %s" % e) | 90 print("PerfData exception: %s" % e) |
90 self.perf_failures = True | 91 self.perf_failures = True |
91 | 92 |
93 def _GetJob(self, test): | |
94 command = self.GetCommand(test) | |
95 timeout = self.context.timeout | |
96 if ("--stress-opt" in test.flags or | |
97 "--stress-opt" in self.context.mode_flags or | |
98 "--stress-opt" in self.context.extra_flags): | |
99 timeout *= 4 | |
100 if test.dependency is not None: | |
101 dep_command = [ c.replace(test.path, test.dependency) for c in command ] | |
102 else: | |
103 dep_command = None | |
104 return Job(command, dep_command, test.id, timeout, self.context.verbose) | |
105 | |
106 def _MaybeRerun(self, pool, test): | |
107 if test.run <= self.context.rerun_failures_count: | |
108 # Possibly rerun this test if its run count is below the maximum per | |
109 # test. | |
110 if test.run == 1: | |
111 # Count the overall number of reran tests on the first rerun. | |
112 if self.reran_tests < self.context.rerun_failures_max: | |
113 self.reran_tests += 1 | |
114 else: | |
115 # Don't rerun this if the overall number of rerun tests has been | |
116 # reached. | |
117 return | |
118 if test.run >= 2 and test.duration > self.context.timeout / 20: | |
119 # Rerun slow tests at most once. | |
120 return | |
121 | |
122 # Rerun this test. | |
123 test.duration = None | |
Jakob Kummerow
2014/07/02 07:27:26
I don't think we need to reset duration and output
Michael Achenbach
2014/07/02 08:06:33
OK - keeping it.
| |
124 test.output = None | |
125 test.run += 1 | |
126 pool.add([self._GetJob(test)]) | |
127 self.remaining += 1 | |
128 | |
92 def Run(self, jobs): | 129 def Run(self, jobs): |
93 self.indicator.Starting() | 130 self.indicator.Starting() |
94 self._RunInternal(jobs) | 131 self._RunInternal(jobs) |
95 self.indicator.Done() | 132 self.indicator.Done() |
96 if self.failed or self.remaining: | 133 if self.failed or self.remaining: |
97 return 1 | 134 return 1 |
98 return 0 | 135 return 0 |
99 | 136 |
100 def _RunInternal(self, jobs): | 137 def _RunInternal(self, jobs): |
101 pool = Pool(jobs) | 138 pool = Pool(jobs) |
102 test_map = {} | 139 test_map = {} |
103 # TODO(machenbach): Instead of filling the queue completely before | 140 # TODO(machenbach): Instead of filling the queue completely before |
104 # pool.imap_unordered, make this a generator that already starts testing | 141 # pool.imap_unordered, make this a generator that already starts testing |
105 # while the queue is filled. | 142 # while the queue is filled. |
106 queue = [] | 143 queue = [] |
107 queued_exception = None | 144 queued_exception = None |
108 for test in self.tests: | 145 for test in self.tests: |
109 assert test.id >= 0 | 146 assert test.id >= 0 |
110 test_map[test.id] = test | 147 test_map[test.id] = test |
111 try: | 148 try: |
112 command = self.GetCommand(test) | 149 queue.append([self._GetJob(test)]) |
Michael Achenbach
2014/07/01 15:21:11
For simplicity, I put it all within the try block.
| |
113 except Exception, e: | 150 except Exception, e: |
114 # If this failed, save the exception and re-raise it later (after | 151 # If this failed, save the exception and re-raise it later (after |
115 # all other tests have had a chance to run). | 152 # all other tests have had a chance to run). |
116 queued_exception = e | 153 queued_exception = e |
117 continue | 154 continue |
118 timeout = self.context.timeout | |
119 if ("--stress-opt" in test.flags or | |
120 "--stress-opt" in self.context.mode_flags or | |
121 "--stress-opt" in self.context.extra_flags): | |
122 timeout *= 4 | |
123 if test.dependency is not None: | |
124 dep_command = [ c.replace(test.path, test.dependency) for c in command ] | |
125 else: | |
126 dep_command = None | |
127 job = Job(command, dep_command, test.id, timeout, self.context.verbose) | |
128 queue.append([job]) | |
129 try: | 155 try: |
130 it = pool.imap_unordered(RunTest, queue) | 156 it = pool.imap_unordered(RunTest, queue) |
131 for result in it: | 157 for result in it: |
132 test = test_map[result[0]] | 158 test = test_map[result[0]] |
133 self.indicator.AboutToRun(test) | 159 self.indicator.AboutToRun(test) |
134 test.output = result[1] | 160 test.output = result[1] |
135 test.duration = result[2] | 161 test.duration = result[2] |
136 has_unexpected_output = test.suite.HasUnexpectedOutput(test) | 162 has_unexpected_output = test.suite.HasUnexpectedOutput(test) |
137 if has_unexpected_output: | 163 if has_unexpected_output: |
138 self.failed.append(test) | 164 self.failed.append(test) |
139 if test.output.HasCrashed(): | 165 if test.output.HasCrashed(): |
140 self.crashed += 1 | 166 self.crashed += 1 |
141 else: | 167 else: |
142 self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) | 168 self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) |
143 self.succeeded += 1 | 169 self.succeeded += 1 |
144 self.remaining -= 1 | 170 self.remaining -= 1 |
145 self.indicator.HasRun(test, has_unexpected_output) | 171 self.indicator.HasRun(test, has_unexpected_output) |
172 if has_unexpected_output: | |
173 # Rerun test failures after the indicator has processed the results. | |
174 self._MaybeRerun(pool, test) | |
146 finally: | 175 finally: |
147 pool.terminate() | 176 pool.terminate() |
148 self._RunPerfSafe(lambda: self.perf_data_manager.close()) | 177 self._RunPerfSafe(lambda: self.perf_data_manager.close()) |
149 if self.perf_failures: | 178 if self.perf_failures: |
150 # Nuke perf data in case of failures. This might not work on windows as | 179 # Nuke perf data in case of failures. This might not work on windows as |
151 # some files might still be open. | 180 # some files might still be open. |
152 print "Deleting perf test data due to db corruption." | 181 print "Deleting perf test data due to db corruption." |
153 shutil.rmtree(self.datapath) | 182 shutil.rmtree(self.datapath) |
154 if queued_exception: | 183 if queued_exception: |
155 raise queued_exception | 184 raise queued_exception |
(...skipping 13 matching lines...) Expand all Loading... | |
169 test.suite.GetFlagsForTestCase(test, self.context) + | 198 test.suite.GetFlagsForTestCase(test, self.context) + |
170 self.context.extra_flags) | 199 self.context.extra_flags) |
171 return cmd | 200 return cmd |
172 | 201 |
173 | 202 |
174 class BreakNowException(Exception): | 203 class BreakNowException(Exception): |
175 def __init__(self, value): | 204 def __init__(self, value): |
176 self.value = value | 205 self.value = value |
177 def __str__(self): | 206 def __str__(self): |
178 return repr(self.value) | 207 return repr(self.value) |
OLD | NEW |