OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # | |
3 # Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file | |
4 # for details. All rights reserved. Use of this source code is governed by a | |
5 # BSD-style license that can be found in the LICENSE file. | |
6 # | |
7 | |
8 """Test driver for the Dart project used by continuous build and developers.""" | |
9 | |
10 | |
11 import imp | |
12 import optparse | |
13 import os | |
14 import Queue | |
15 import re | |
16 import sys | |
17 import threading | |
18 import time | |
19 import urllib | |
20 | |
21 import testing | |
22 from testing import test_runner | |
23 import utils | |
24 | |
25 | |
26 TIMEOUT_SECS = 60 | |
27 ARCH_GUESS = utils.GuessArchitecture() | |
28 OS_GUESS = utils.GuessOS() | |
29 BUILT_IN_TESTS = ['dartc', 'vm', 'standalone', 'corelib', 'language', 'co19', | |
30 'samples', 'isolate', 'stub-generator', 'client'] | |
31 | |
32 # Patterns for matching test options in .dart files. | |
33 VM_OPTIONS_PATTERN = re.compile(r'// VMOptions=(.*)') | |
34 DART_OPTIONS_PATTERN = re.compile(r'// DartOptions=(.*)') | |
35 ISOLATE_STUB_PATTERN = re.compile(r'// IsolateStubs=(.*)') | |
36 | |
37 # --------------------------------------------- | |
38 # --- P r o g r e s s I n d i c a t o r s --- | |
39 # --------------------------------------------- | |
40 | |
41 | |
42 class Error(Exception): | |
43 pass | |
44 | |
45 | |
46 class ProgressIndicator(object): | |
47 """Base class for displaying the progress of the test run.""" | |
48 | |
49 def __init__(self, cases, context, start_time): | |
50 self.abort = False | |
51 self.terminate = False | |
52 self.cases = cases | |
53 self.start_time = start_time | |
54 self.queue = Queue.Queue(len(cases)) | |
55 self.batch_queues = {} | |
56 self.context = context | |
57 | |
58 # Extract batchable cases. | |
59 found_cmds = {} | |
60 for case in cases: | |
61 cmd = case.case.GetCommand()[0] | |
62 if not utils.IsWindows(): | |
63 # Diagnostic check for executable (if an absolute pathname) | |
64 if not cmd in found_cmds: | |
65 if os.path.isabs(cmd) and not os.path.isfile(cmd): | |
66 msg = "Can't find command %s\n" % cmd | |
67 msg += '(Did you build first? ' | |
68 msg += 'Are you running in the correct directory?)' | |
69 raise Exception(msg) | |
70 else: | |
71 found_cmds[cmd] = 1 | |
72 | |
73 if case.case.IsBatchable(): | |
74 if not cmd in self.batch_queues: | |
75 self.batch_queues[cmd] = Queue.Queue(len(cases)) | |
76 self.batch_queues[cmd].put(case) | |
77 else: | |
78 self.queue.put_nowait(case) | |
79 | |
80 self.succeeded = 0 | |
81 self.remaining = len(cases) | |
82 self.total = len(cases) | |
83 self.failed = [] | |
84 self.crashed = 0 | |
85 self.lock = threading.Lock() | |
86 | |
87 def PrintFailureHeader(self, test): | |
88 if test.IsNegative(): | |
89 negative_marker = '[negative] ' | |
90 else: | |
91 negative_marker = '' | |
92 print '=== %(label)s %(negative)s===' % { | |
93 'label': test.GetLabel(), | |
94 'negative': negative_marker | |
95 } | |
96 print 'Path: %s' % '/'.join(test.path) | |
97 | |
98 def Run(self, tasks): | |
99 """Starts tests and keeps running until queues are drained.""" | |
100 self.Starting() | |
101 | |
102 # Scale the number of tasks to the nubmer of CPUs on the machine | |
103 if tasks == testing.USE_DEFAULT_CPUS: | |
104 tasks = testing.HOST_CPUS | |
105 | |
106 # TODO(zundel): Refactor BatchSingle method and TestRunner to | |
107 # share code and simplify this method. | |
108 | |
109 # Start the non-batchable items first - there are some long running | |
110 # jobs we don't want to wait on at the end. | |
111 threads = [] | |
112 # Spawn N-1 threads and then use this thread as the last one. | |
113 # That way -j1 avoids threading altogether which is a nice fallback | |
114 # in case of threading problems. | |
115 for unused_i in xrange(tasks - 1): | |
116 thread = threading.Thread(target=self.RunSingle, args=[]) | |
117 threads.append(thread) | |
118 thread.start() | |
119 | |
120 # Next, crank up the batchable tasks. Note that this will start | |
121 # 'tasks' more threads, but the assumption is that if batching is | |
122 # enabled that almost all tests are batchable. | |
123 for (cmd, queue) in self.batch_queues.items(): | |
124 if not queue.empty(): | |
125 batch_tester = None | |
126 try: | |
127 batch_tester = test_runner.BatchRunner(queue, tasks, self, | |
128 [cmd, '-batch']) | |
129 except: | |
130 print 'Aborting batch test for ' + cmd + '. Problem on startup.' | |
131 if batch_tester: batch_tester.Shutdown() | |
132 raise | |
133 | |
134 try: | |
135 batch_tester.WaitForCompletion() | |
136 except: | |
137 print 'Aborting batch cmd ' + cmd + 'while waiting for completion.' | |
138 if batch_tester: batch_tester.Shutdown() | |
139 raise | |
140 | |
141 try: | |
142 self.RunSingle() | |
143 if self.abort: | |
144 raise Error('Aborted') | |
145 # Wait for the remaining non-batched threads. | |
146 for thread in threads: | |
147 # Use a timeout so that signals (ctrl-c) will be processed. | |
148 thread.join(timeout=10000000) | |
149 if self.abort: | |
150 raise Error('Aborted') | |
151 except: | |
152 # If there's an exception we schedule an interruption for any | |
153 # remaining threads. | |
154 self.terminate = True | |
155 # ...and then reraise the exception to bail out | |
156 raise | |
157 | |
158 self.Done() | |
159 return not self.failed | |
160 | |
161 def RunSingle(self): | |
162 while not self.terminate: | |
163 try: | |
164 test = self.queue.get_nowait() | |
165 except Queue.Empty: | |
166 return | |
167 case = test.case | |
168 with self.lock: | |
169 self.AboutToRun(case) | |
170 try: | |
171 start = time.time() | |
172 output = case.Run() | |
173 case.duration = (time.time() - start) | |
174 except KeyboardInterrupt: | |
175 self.abort = True | |
176 self.terminate = True | |
177 raise | |
178 except IOError: | |
179 self.abort = True | |
180 self.terminate = True | |
181 raise | |
182 if self.terminate: | |
183 return | |
184 with self.lock: | |
185 if output.UnexpectedOutput(): | |
186 self.failed.append(output) | |
187 if output.HasCrashed(): | |
188 self.crashed += 1 | |
189 else: | |
190 self.succeeded += 1 | |
191 self.remaining -= 1 | |
192 self.HasRun(output) | |
193 | |
194 | |
195 def EscapeCommand(command): | |
196 parts = [] | |
197 for part in command: | |
198 if ' ' in part: | |
199 # Escape spaces. We may need to escape more characters for this | |
200 # to work properly. | |
201 parts.append('"%s"' % part) | |
202 else: | |
203 parts.append(part) | |
204 return ' '.join(parts) | |
205 | |
206 | |
207 class SimpleProgressIndicator(ProgressIndicator): | |
208 """Base class for printing output of each test separately.""" | |
209 | |
210 def Starting(self): | |
211 """Called at the beginning before any tests are run.""" | |
212 print 'Running %i tests' % len(self.cases) | |
213 | |
214 def Done(self): | |
215 """Called when all tests are complete.""" | |
216 print | |
217 for failed in self.failed: | |
218 self.PrintFailureHeader(failed.test) | |
219 if failed.output.stderr: | |
220 print '--- stderr ---' | |
221 print failed.output.stderr.strip() | |
222 if failed.output.stdout: | |
223 print '--- stdout ---' | |
224 print failed.output.stdout.strip() | |
225 print 'Command: %s' % EscapeCommand(failed.command) | |
226 if failed.HasCrashed(): | |
227 print '--- CRASHED ---' | |
228 if failed.HasTimedOut(): | |
229 print '--- TIMEOUT ---' | |
230 if not self.failed: | |
231 print '===' | |
232 print '=== All tests succeeded' | |
233 print '===' | |
234 else: | |
235 print | |
236 print '===' | |
237 if len(self.failed) == 1: | |
238 print '=== 1 test failed' | |
239 else: | |
240 print '=== %i tests failed' % len(self.failed) | |
241 if self.crashed > 0: | |
242 if self.crashed == 1: | |
243 print '=== 1 test CRASHED' | |
244 else: | |
245 print '=== %i tests CRASHED' % self.crashed | |
246 print '===' | |
247 | |
248 | |
249 class VerboseProgressIndicator(SimpleProgressIndicator): | |
250 """Print verbose information about each test that is run.""" | |
251 | |
252 def AboutToRun(self, case): | |
253 """Called before each test case is run.""" | |
254 print 'Starting %s...' % case.GetLabel() | |
255 sys.stdout.flush() | |
256 | |
257 def HasRun(self, output): | |
258 """Called after each test case is run.""" | |
259 if output.UnexpectedOutput(): | |
260 if output.HasCrashed(): | |
261 outcome = 'CRASH' | |
262 else: | |
263 outcome = 'FAIL' | |
264 else: | |
265 outcome = 'PASS' | |
266 print 'Done running %s: %s' % (output.test.GetLabel(), outcome) | |
267 | |
268 | |
269 class OneLineProgressIndicator(SimpleProgressIndicator): | |
270 """Results of each test is printed like a report, on a line by itself.""" | |
271 | |
272 def AboutToRun(self, case): | |
273 """Called before each test case is run.""" | |
274 pass | |
275 | |
276 def HasRun(self, output): | |
277 """Called after each test case is run.""" | |
278 if output.UnexpectedOutput(): | |
279 if output.HasCrashed(): | |
280 outcome = 'CRASH' | |
281 else: | |
282 outcome = 'FAIL' | |
283 else: | |
284 outcome = 'pass' | |
285 print 'Done %s: %s' % (output.test.GetLabel(), outcome) | |
286 | |
287 | |
288 class StatusFileProgressIndicator(SimpleProgressIndicator): | |
289 | |
290 def AboutToRun(self, case): | |
291 """Called before each test case is run.""" | |
292 pass | |
293 | |
294 def HasRun(self, output): | |
295 """Called after each test case is run.""" | |
296 actual_outcome = output.GetOutcome() | |
297 expected_outcomes = set(output.test.outcomes) | |
298 if not actual_outcome in expected_outcomes: | |
299 expected_outcomes.discard(testing.PASS) | |
300 if expected_outcomes: | |
301 print 'Incorrect status for %s: %s' % (output.test.GetLabel(), | |
302 ', '.join(expected_outcomes)) | |
303 else: | |
304 print 'Update status for %s: %s' % (output.test.GetLabel(), | |
305 actual_outcome) | |
306 | |
307 | |
308 class OneLineProgressIndicatorForBuildBot(OneLineProgressIndicator): | |
309 | |
310 def HasRun(self, output): | |
311 """Called after each test case is run.""" | |
312 super(OneLineProgressIndicatorForBuildBot, self).HasRun(output) | |
313 percent = (((self.total - self.remaining) * 100) // self.total) | |
314 print '@@@STEP_CLEAR@@@' | |
315 print '@@@STEP_TEXT@ %3d%% +%d -%d @@@' % ( | |
316 percent, self.succeeded, len(self.failed)) | |
317 | |
318 | |
319 class CompactProgressIndicator(ProgressIndicator): | |
320 """Continuously updates a single line w/ a summary of progress of the run.""" | |
321 | |
322 def __init__(self, cases, context, start_time, templates): | |
323 super(CompactProgressIndicator, self).__init__(cases, context, start_time) | |
324 self.templates = templates | |
325 self.last_status_length = 0 | |
326 | |
327 def Starting(self): | |
328 """Called at the beginning before any tests are run.""" | |
329 pass | |
330 | |
331 def Done(self): | |
332 """Called when all tests are complete.""" | |
333 self._PrintProgress('Done') | |
334 | |
335 def AboutToRun(self, case): | |
336 """Called before each test case is run.""" | |
337 self._PrintProgress(case.GetLabel()) | |
338 | |
339 def HasRun(self, output): | |
340 """Called after each test case is run.""" | |
341 if output.UnexpectedOutput(): | |
342 self.ClearLine(self.last_status_length) | |
343 self.PrintFailureHeader(output.test) | |
344 stdout = output.output.stdout.strip() | |
345 if stdout: | |
346 print self.templates['stdout'] % stdout | |
347 stderr = output.output.stderr.strip() | |
348 if stderr: | |
349 print self.templates['stderr'] % stderr | |
350 print 'Command: %s' % EscapeCommand(output.command) | |
351 if output.HasCrashed(): | |
352 print '--- CRASHED ---' | |
353 if output.HasTimedOut(): | |
354 print '--- TIMEOUT ---' | |
355 | |
356 def _Truncate(self, buf, length): | |
357 """Truncate a line if it exceeds length, substituting an ellipsis...""" | |
358 if length and (len(buf) > (length - 3)): | |
359 return buf[:(length-3)] + '...' | |
360 else: | |
361 return buf | |
362 | |
363 def _PrintProgress(self, name): | |
364 """Refresh the display.""" | |
365 self.ClearLine(self.last_status_length) | |
366 elapsed = time.time() - self.start_time | |
367 status = self.templates['status_line'] % { | |
368 'passed': self.succeeded, | |
369 'percent': (((self.total - self.remaining) * 100) // self.total), | |
370 'failed': len(self.failed), | |
371 'test': name, | |
372 'mins': int(elapsed) / 60, | |
373 'secs': int(elapsed) % 60 | |
374 } | |
375 status = self._Truncate(status, 78) | |
376 self.last_status_length = len(status) | |
377 print status, | |
378 sys.stdout.flush() | |
379 | |
380 def ClearLine(self, last_line_length): | |
381 """Erase the current line w/ a linefeed and overwriting with spaces.""" | |
382 print ('\r' + (' ' * last_line_length) + '\r'), | |
383 | |
384 | |
385 class MonochromeProgressIndicator(CompactProgressIndicator): | |
386 """A CompactProgressIndicator with no color.""" | |
387 | |
388 def __init__(self, cases, context, start_time): | |
389 templates = { | |
390 'status_line': '[%(mins)02i:%(secs)02i|%%%(percent) ' | |
391 '4d|+%(passed) 4d|-%(failed) 4d]: %(test)s', | |
392 'stdout': '%s', | |
393 'stderr': '%s', | |
394 'clear': lambda last_line_len: self.ClearLine(last_line_len), | |
395 'max_length': 78 | |
396 } | |
397 super(MonochromeProgressIndicator, self).__init__(cases, | |
398 context, | |
399 start_time, | |
400 templates) | |
401 | |
402 | |
403 class ColorProgressIndicator(CompactProgressIndicator): | |
404 """A CompactProgressIndicator with pretty colors.""" | |
405 | |
406 def __init__(self, cases, context, start_time): | |
407 templates = { | |
408 'status_line': ('[%(mins)02i:%(secs)02i|%%%(percent) 4d|' | |
409 '\033[32m+%(passed) 4d' | |
410 '\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s'), | |
411 'stdout': '%s', | |
412 'stderr': '%s', | |
413 'clear': lambda last_line_len: self.ClearLine(last_line_len), | |
414 'max_length': 78 | |
415 } | |
416 super(ColorProgressIndicator, self).__init__(cases, | |
417 context, | |
418 start_time, | |
419 templates) | |
420 | |
421 | |
422 PROGRESS_INDICATORS = { | |
423 'verbose': VerboseProgressIndicator, | |
424 'mono': MonochromeProgressIndicator, | |
425 'color': ColorProgressIndicator, | |
426 'line': OneLineProgressIndicator, | |
427 'buildbot': OneLineProgressIndicatorForBuildBot, | |
428 'status': StatusFileProgressIndicator, | |
429 } | |
430 | |
431 | |
432 # ------------------------- | |
433 # --- F r a m e w o r k --- | |
434 # ------------------------- | |
435 | |
436 | |
437 class TestCase(object): | |
438 """A single test case, like running 'dart' on a single .dart file.""" | |
439 | |
440 def __init__(self, context, path): | |
441 self.path = path | |
442 self.context = context | |
443 self.duration = None | |
444 self.arch = [] | |
445 self.component = [] | |
446 | |
447 def IsBatchable(self): | |
448 if self.context.use_batch: | |
449 if self.component and 'dartc' in self.component: | |
450 return True | |
451 return False | |
452 | |
453 def IsNegative(self): | |
454 return False | |
455 | |
456 def CompareTime(self, other): | |
457 return cmp(other.duration, self.duration) | |
458 | |
459 def DidFail(self, output): | |
460 if output.failed is None: | |
461 output.failed = self.IsFailureOutput(output) | |
462 return output.failed | |
463 | |
464 def IsFailureOutput(self, output): | |
465 return output.exit_code != 0 | |
466 | |
467 def RunCommand(self, command, cwd=None, cleanup=True): | |
468 full_command = self.context.processor(command) | |
469 try: | |
470 output = test_runner.Execute(full_command, self.context, | |
471 self.context.timeout, cwd) | |
472 except OSError as e: | |
473 raise utils.ToolError('%s: %s' % (full_command[0], e.strerror)) | |
474 test_output = test_runner.TestOutput(self, full_command, output) | |
475 if cleanup: self.Cleanup() | |
476 return test_output | |
477 | |
478 def BeforeRun(self): | |
479 pass | |
480 | |
481 def AfterRun(self): | |
482 pass | |
483 | |
484 def Run(self): | |
485 self.BeforeRun() | |
486 cmd = self.GetCommand() | |
487 try: | |
488 result = self.RunCommand(cmd) | |
489 finally: | |
490 self.AfterRun() | |
491 return result | |
492 | |
493 def Cleanup(self): | |
494 return | |
495 | |
496 | |
497 class TestConfiguration(object): | |
498 """Test configurations give test.py the list of tests, e.g. listing a dir.""" | |
499 | |
500 def __init__(self, context, root, flags=[]): | |
501 self.context = context | |
502 self.root = root | |
503 self.flags = flags | |
504 | |
505 def Contains(self, path, filename): | |
506 """Returns True if the given path regexp matches the passed filename.""" | |
507 | |
508 if len(path) > len(filename): | |
509 return False | |
510 for i in xrange(len(path)): | |
511 try: | |
512 if not path[i].match(filename[i]): | |
513 return False | |
514 except: | |
515 print 'Invalid regexp %s in .status file. ' % '/'.join(path) | |
516 print 'Try escaping special characters with \\' | |
517 raise | |
518 | |
519 return True | |
520 | |
521 def GetTestStatus(self, sections, defs): | |
522 pass | |
523 | |
524 | |
525 class TestSuite(object): | |
526 | |
527 def __init__(self, name): | |
528 self.name = name | |
529 | |
530 def GetName(self): | |
531 return self.name | |
532 | |
533 | |
534 class TestRepository(TestSuite): | |
535 """A collection of test configurations.""" | |
536 | |
537 def __init__(self, path): | |
538 normalized_path = os.path.abspath(path) | |
539 super(TestRepository, self).__init__(os.path.basename(normalized_path)) | |
540 self.path = normalized_path | |
541 self.is_loaded = False | |
542 self.config = None | |
543 | |
544 def GetConfiguration(self, context): | |
545 """Retrieve a TestConfiguration subclass for this set of tests.""" | |
546 if self.is_loaded: | |
547 return self.config | |
548 self.is_loaded = True | |
549 filename = None | |
550 try: | |
551 (filename, pathname, description) = imp.find_module( | |
552 'testcfg', [self.path]) | |
553 module = imp.load_module('testcfg', filename, pathname, description) | |
554 self.config = module.GetConfiguration(context, self.path) | |
555 finally: | |
556 if filename: | |
557 filename.close() | |
558 return self.config | |
559 | |
560 def ListTests(self, current_path, path, context, mode, arch, component): | |
561 return self.GetConfiguration(context).ListTests(current_path, | |
562 path, | |
563 mode, | |
564 arch, | |
565 component) | |
566 | |
567 def GetTestStatus(self, context, sections, defs): | |
568 self.GetConfiguration(context).GetTestStatus(sections, defs) | |
569 | |
570 | |
571 class LiteralTestSuite(TestSuite): | |
572 """Represents one set of tests.""" | |
573 | |
574 def __init__(self, tests): | |
575 super(LiteralTestSuite, self).__init__('root') | |
576 self.tests = tests | |
577 | |
578 def ListTests(self, current_path, path, context, mode, arch, component): | |
579 name = path[0] | |
580 result = [] | |
581 for test in self.tests: | |
582 test_name = test.GetName() | |
583 if name.match(test_name): | |
584 full_path = current_path + [test_name] | |
585 result += test.ListTests(full_path, path, context, mode, arch, component
) | |
586 return result | |
587 | |
588 def GetTestStatus(self, context, sections, defs): | |
589 for test in self.tests: | |
590 test.GetTestStatus(context, sections, defs) | |
591 | |
592 | |
593 class Context(object): | |
594 """A way to send global context for the test run to each test case.""" | |
595 | |
596 def __init__(self, workspace, verbose, os_name, timeout, | |
597 processor, suppress_dialogs, executable, flags, | |
598 keep_temporary_files, use_batch, checked): | |
599 self.workspace = workspace | |
600 self.verbose = verbose | |
601 self.os = os_name | |
602 self.timeout = timeout | |
603 self.processor = processor | |
604 self.suppress_dialogs = suppress_dialogs | |
605 self.executable = executable | |
606 self.flags = flags | |
607 self.keep_temporary_files = keep_temporary_files | |
608 self.use_batch = use_batch == 'true' | |
609 self.checked = checked | |
610 | |
611 def GetBuildRoot(self, mode, arch): | |
612 """The top level directory containing compiler, runtime, tools...""" | |
613 result = utils.GetBuildRoot(self.os, mode, arch) | |
614 return result | |
615 | |
616 def GetBuildConf(self, mode, arch): | |
617 result = utils.GetBuildConf(mode, arch) | |
618 return result | |
619 | |
620 def GetExecutable(self, mode, arch, path): | |
621 """Returns the name of the executable used to run the test.""" | |
622 if self.executable is not None: | |
623 return self.executable | |
624 if utils.IsWindows() and not path.endswith('.exe'): | |
625 return path + '.exe' | |
626 else: | |
627 return path | |
628 | |
629 def GetD8(self, mode, arch): | |
630 d8 = os.path.join(self.GetBuildRoot(mode, arch), 'd8') | |
631 return self.GetExecutable(mode, arch, d8) | |
632 | |
633 def GetDart(self, mode, arch, component): | |
634 dart = utils.GetDartRunner(mode, arch, component) | |
635 return [self.GetExecutable(mode, arch, dart)] | |
636 | |
637 def GetDartC(self, mode, arch): | |
638 """Returns the path to the Dart --> JS compiler.""" | |
639 dartc = os.path.abspath(os.path.join( | |
640 self.GetBuildRoot(mode, arch), 'compiler', 'bin', 'dartc')) | |
641 if utils.IsWindows(): dartc += '.exe' | |
642 command = [dartc] | |
643 | |
644 # Add the flags from the context to the command line. | |
645 command += self.flags | |
646 return command | |
647 | |
648 def GetRunTests(self, mode, arch): | |
649 path = os.path.join(self.GetBuildRoot(mode, arch), 'run_vm_tests') | |
650 return [self.GetExecutable(mode, arch, path)] | |
651 | |
652 | |
653 def RunTestCases(cases_to_run, progress, tasks, context, start_time): | |
654 """Chooses a progress indicator and then starts the tests.""" | |
655 progress = PROGRESS_INDICATORS[progress](cases_to_run, context, start_time) | |
656 return progress.Run(tasks) | |
657 | |
658 | |
659 # ------------------------------------------- | |
660 # --- T e s t C o n f i g u r a t i o n --- | |
661 # ------------------------------------------- | |
662 | |
663 | |
664 class Expression(object): | |
665 pass | |
666 | |
667 | |
668 class Constant(Expression): | |
669 | |
670 def __init__(self, value): | |
671 super(Constant, self).__init__() | |
672 self.value = value | |
673 | |
674 def Evaluate(self, unused_env, unused_defs): | |
675 return self.value | |
676 | |
677 | |
678 class Variable(Expression): | |
679 | |
680 def __init__(self, name): | |
681 super(Variable, self).__init__() | |
682 self.name = name | |
683 | |
684 def GetOutcomes(self, env, unused_defs): | |
685 if self.name in env: | |
686 return ListSet([env[self.name]]) | |
687 else: return Nothing() | |
688 | |
689 def Evaluate(self, env, defs): | |
690 return env[self.name] | |
691 | |
692 | |
693 class Outcome(Expression): | |
694 | |
695 def __init__(self, name): | |
696 super(Outcome, self).__init__() | |
697 self.name = name | |
698 | |
699 def GetOutcomes(self, env, defs): | |
700 if self.name in defs: | |
701 return defs[self.name].GetOutcomes(env, defs) | |
702 else: | |
703 return ListSet([self.name]) | |
704 | |
705 | |
706 class Set(object): | |
707 """An abstract set class used to hold Rules.""" | |
708 pass | |
709 | |
710 | |
711 class ListSet(Set): | |
712 """A set that uses lists for storage.""" | |
713 | |
714 def __init__(self, elms): | |
715 super(ListSet, self).__init__() | |
716 self.elms = elms | |
717 | |
718 def __str__(self): | |
719 return 'ListSet%s' % str(self.elms) | |
720 | |
721 def Intersect(self, that): | |
722 if not isinstance(that, ListSet): | |
723 return that.Intersect(self) | |
724 return ListSet([x for x in self.elms if x in that.elms]) | |
725 | |
726 def Union(self, that): | |
727 if not isinstance(that, ListSet): | |
728 return that.Union(self) | |
729 return ListSet(self.elms + | |
730 [x for x in that.elms if x not in self.elms]) | |
731 | |
732 def IsEmpty(self): | |
733 return not self.elms | |
734 | |
735 | |
736 class Everything(Set): | |
737 """A set that represents all possible values.""" | |
738 | |
739 def Intersect(self, that): | |
740 return that | |
741 | |
742 def Union(self, unused_that): | |
743 return self | |
744 | |
745 def IsEmpty(self): | |
746 return False | |
747 | |
748 | |
749 class Nothing(Set): | |
750 | |
751 def Intersect(self, unused_that): | |
752 return self | |
753 | |
754 def Union(self, that): | |
755 return that | |
756 | |
757 def IsEmpty(self): | |
758 return True | |
759 | |
760 | |
761 class Operation(Expression): | |
762 """A conditional expression. e.g. ($arch == ia32).""" | |
763 | |
764 def __init__(self, left, op, right): | |
765 super(Operation, self).__init__() | |
766 self.left = left | |
767 self.op = op | |
768 self.right = right | |
769 | |
770 def Evaluate(self, env, defs): | |
771 """Evaluates expression in the .status file. e.g. ($arch == ia32).""" | |
772 if self.op == '||' or self.op == ',': | |
773 return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs) | |
774 elif self.op == 'if': | |
775 return False | |
776 elif self.op == '==': | |
777 outcomes = self.left.GetOutcomes(env, defs) | |
778 inter = outcomes.Intersect(self.right.GetOutcomes(env, defs)) | |
779 return not inter.IsEmpty() | |
780 else: | |
781 assert self.op == '&&' | |
782 return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs) | |
783 | |
784 def GetOutcomes(self, env, defs): | |
785 if self.op == '||' or self.op == ',': | |
786 outcomes = self.left.GetOutcomes(env, defs) | |
787 return outcomes.Union(self.right.GetOutcomes(env, defs)) | |
788 elif self.op == 'if': | |
789 if self.right.Evaluate(env, defs): | |
790 return self.left.GetOutcomes(env, defs) | |
791 else: return Nothing() | |
792 else: | |
793 assert self.op == '&&' | |
794 outcomes = self.left.GetOutcomes(env, defs) | |
795 return outcomes.Intersect(self.right.GetOutcomes(env, defs)) | |
796 | |
797 | |
798 def IsAlpha(buf): | |
799 """Returns True if the entire string is alphanumeric.""" | |
800 for char in buf: | |
801 if not (char.isalpha() or char.isdigit() or char == '_'): | |
802 return False | |
803 return True | |
804 | |
805 | |
806 class Tokenizer(object): | |
807 """Tokenizer that chops expressions into variables, parens and operators.""" | |
808 | |
809 def __init__(self, expr): | |
810 self.index = 0 | |
811 self.expr = expr | |
812 self.length = len(expr) | |
813 self.tokens = None | |
814 | |
815 def Current(self, length=1): | |
816 if not self.HasMore(length): return '' | |
817 return self.expr[self.index:self.index+length] | |
818 | |
819 def HasMore(self, length=1): | |
820 return self.index < self.length + (length - 1) | |
821 | |
822 def Advance(self, count=1): | |
823 self.index += count | |
824 | |
825 def AddToken(self, token): | |
826 self.tokens.append(token) | |
827 | |
828 def SkipSpaces(self): | |
829 while self.HasMore() and self.Current().isspace(): | |
830 self.Advance() | |
831 | |
832 def Tokenize(self): | |
833 """Lexical analysis of an expression in a .status file. | |
834 | |
835 Example: | |
836 [ $mode == debug && ($component == chromium || $component == dartc) ] | |
837 | |
838 Args: | |
839 None. | |
840 | |
841 Returns: | |
842 A list of tokens on success, None on failure. | |
843 """ | |
844 | |
845 self.tokens = [] | |
846 while self.HasMore(): | |
847 self.SkipSpaces() | |
848 if not self.HasMore(): | |
849 return None | |
850 if self.Current() == '(': | |
851 self.AddToken('(') | |
852 self.Advance() | |
853 elif self.Current() == ')': | |
854 self.AddToken(')') | |
855 self.Advance() | |
856 elif self.Current() == '$': | |
857 self.AddToken('$') | |
858 self.Advance() | |
859 elif self.Current() == ',': | |
860 self.AddToken(',') | |
861 self.Advance() | |
862 elif IsAlpha(self.Current()): | |
863 buf = '' | |
864 while self.HasMore() and IsAlpha(self.Current()): | |
865 buf += self.Current() | |
866 self.Advance() | |
867 self.AddToken(buf) | |
868 elif self.Current(2) == '&&': | |
869 self.AddToken('&&') | |
870 self.Advance(2) | |
871 elif self.Current(2) == '||': | |
872 self.AddToken('||') | |
873 self.Advance(2) | |
874 elif self.Current(2) == '==': | |
875 self.AddToken('==') | |
876 self.Advance(2) | |
877 else: | |
878 return None | |
879 return self.tokens | |
880 | |
881 | |
882 class Scanner(object): | |
883 """A simple scanner that can serve out tokens from a given list.""" | |
884 | |
885 def __init__(self, tokens): | |
886 self.tokens = tokens | |
887 self.length = len(tokens) | |
888 self.index = 0 | |
889 | |
890 def HasMore(self): | |
891 return self.index < self.length | |
892 | |
893 def Current(self): | |
894 return self.tokens[self.index] | |
895 | |
896 def Advance(self): | |
897 self.index += 1 | |
898 | |
899 | |
900 def ParseAtomicExpression(scan): | |
901 """Parse an single (non recursive) expression in a .status file.""" | |
902 | |
903 if scan.Current() == 'true': | |
904 scan.Advance() | |
905 return Constant(True) | |
906 elif scan.Current() == 'false': | |
907 scan.Advance() | |
908 return Constant(False) | |
909 elif IsAlpha(scan.Current()): | |
910 name = scan.Current() | |
911 scan.Advance() | |
912 return Outcome(name.lower()) | |
913 elif scan.Current() == '$': | |
914 scan.Advance() | |
915 if not IsAlpha(scan.Current()): | |
916 return None | |
917 name = scan.Current() | |
918 scan.Advance() | |
919 return Variable(name.lower()) | |
920 elif scan.Current() == '(': | |
921 scan.Advance() | |
922 result = ParseLogicalExpression(scan) | |
923 if (not result) or (scan.Current() != ')'): | |
924 return None | |
925 scan.Advance() | |
926 return result | |
927 else: | |
928 return None | |
929 | |
930 | |
931 def ParseOperatorExpression(scan): | |
932 """Parse an expression that has operators.""" | |
933 left = ParseAtomicExpression(scan) | |
934 if not left: return None | |
935 while scan.HasMore() and (scan.Current() in ['==']): | |
936 op = scan.Current() | |
937 scan.Advance() | |
938 right = ParseOperatorExpression(scan) | |
939 if not right: | |
940 return None | |
941 left = Operation(left, op, right) | |
942 return left | |
943 | |
944 | |
945 def ParseConditionalExpression(scan): | |
946 left = ParseOperatorExpression(scan) | |
947 if not left: return None | |
948 while scan.HasMore() and (scan.Current() == 'if'): | |
949 scan.Advance() | |
950 right = ParseOperatorExpression(scan) | |
951 if not right: | |
952 return None | |
953 left = Operation(left, 'if', right) | |
954 return left | |
955 | |
956 | |
957 def ParseLogicalExpression(scan): | |
958 """Parse a binary expression separated by boolean operators.""" | |
959 left = ParseConditionalExpression(scan) | |
960 if not left: return None | |
961 while scan.HasMore() and (scan.Current() in ['&&', '||', ',']): | |
962 op = scan.Current() | |
963 scan.Advance() | |
964 right = ParseConditionalExpression(scan) | |
965 if not right: | |
966 return None | |
967 left = Operation(left, op, right) | |
968 return left | |
969 | |
970 | |
971 def ParseCondition(expr): | |
972 """Parses a boolean expression into an Expression object.""" | |
973 tokens = Tokenizer(expr).Tokenize() | |
974 if not tokens: | |
975 print 'Malformed expression: "%s"' % expr | |
976 return None | |
977 scan = Scanner(tokens) | |
978 ast = ParseLogicalExpression(scan) | |
979 if not ast: | |
980 print 'Malformed expression: "%s"' % expr | |
981 return None | |
982 if scan.HasMore(): | |
983 print 'Malformed expression: "%s"' % expr | |
984 return None | |
985 return ast | |
986 | |
987 | |
988 class ClassifiedTest(object): | |
989 | |
990 def __init__(self, case, outcomes): | |
991 self.case = case | |
992 self.outcomes = outcomes | |
993 | |
994 | |
995 class Configuration(object): | |
996 """The parsed contents of a configuration file.""" | |
997 | |
998 def __init__(self, sections, defs): | |
999 self.sections = sections | |
1000 self.defs = defs | |
1001 | |
1002 def ClassifyTests(self, cases, env): | |
1003 """Matches a test case with the test prefixes requested on the cmdline. | |
1004 | |
1005 This 'wraps' each TestCase object with some meta information | |
1006 about the test. | |
1007 | |
1008 Args: | |
1009 cases: list of TestCase objects to classify. | |
1010 env: dictionary containing values for 'mode', | |
1011 'system', 'component', 'arch' and 'checked'. | |
1012 | |
1013 Returns: | |
1014 A triplet of (result, rules, expected_outcomes). | |
1015 """ | |
1016 sections = [s for s in self.sections | |
1017 if s.condition.Evaluate(env, self.defs)] | |
1018 all_rules = reduce(list.__add__, [s.rules for s in sections], []) | |
1019 unused_rules = set(all_rules) | |
1020 result = [] | |
1021 all_outcomes = set([]) | |
1022 for case in cases: | |
1023 matches = [r for r in all_rules if r.Contains(case.path)] | |
1024 outcomes = set([]) | |
1025 for rule in matches: | |
1026 outcomes = outcomes.union(rule.GetOutcomes(env, self.defs)) | |
1027 unused_rules.discard(rule) | |
1028 if not outcomes: | |
1029 outcomes = [testing.PASS] | |
1030 case.outcomes = outcomes | |
1031 all_outcomes = all_outcomes.union(outcomes) | |
1032 result.append(ClassifiedTest(case, outcomes)) | |
1033 return (result, list(unused_rules), all_outcomes) | |
1034 | |
1035 | |
1036 class Section(object): | |
1037 """A section of the configuration file. | |
1038 | |
1039 Sections are enabled or disabled prior to running the tests, | |
1040 based on their conditions. | |
1041 """ | |
1042 | |
1043 def __init__(self, condition): | |
1044 self.condition = condition | |
1045 self.rules = [] | |
1046 | |
1047 def AddRule(self, rule): | |
1048 self.rules.append(rule) | |
1049 | |
1050 | |
1051 class Rule(object): | |
1052 """A single rule that specifies the expected outcome for a single test.""" | |
1053 | |
1054 def __init__(self, raw_path, path, value): | |
1055 self.raw_path = raw_path | |
1056 self.path = path | |
1057 self.value = value | |
1058 | |
1059 def GetOutcomes(self, env, defs): | |
1060 outcomes = self.value.GetOutcomes(env, defs) | |
1061 assert isinstance(outcomes, ListSet) | |
1062 return outcomes.elms | |
1063 | |
1064 def Contains(self, path): | |
1065 """Returns True if the specified path matches this rule (regexp).""" | |
1066 if len(self.path) > len(path): | |
1067 return False | |
1068 for i in xrange(len(self.path)): | |
1069 try: | |
1070 if not self.path[i].match(path[i]): | |
1071 return False | |
1072 except: | |
1073 print 'Invalid regexp %s in .status file. ' % '/'.join(path) | |
1074 print 'Try escaping special characters with \\' | |
1075 raise | |
1076 return True | |
1077 | |
1078 | |
1079 HEADER_PATTERN = re.compile(r'\[([^]]+)\]') | |
1080 RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)') | |
1081 DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$') | |
1082 PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$') | |
1083 | |
1084 | |
1085 def ReadConfigurationInto(path, sections, defs): | |
1086 """Parses a .status file into specified sections and defs arguments.""" | |
1087 current_section = Section(Constant(True)) | |
1088 sections.append(current_section) | |
1089 prefix = [] | |
1090 for line in utils.ReadLinesFrom(path): | |
1091 header_match = HEADER_PATTERN.match(line) | |
1092 if header_match: | |
1093 condition_str = header_match.group(1).strip() | |
1094 condition = ParseCondition(condition_str) | |
1095 new_section = Section(condition) | |
1096 sections.append(new_section) | |
1097 current_section = new_section | |
1098 continue | |
1099 rule_match = RULE_PATTERN.match(line) | |
1100 if rule_match: | |
1101 path = prefix + _SplitPath(rule_match.group(1).strip()) | |
1102 value_str = rule_match.group(2).strip() | |
1103 value = ParseCondition(value_str) | |
1104 if not value: | |
1105 return False | |
1106 current_section.AddRule(Rule(rule_match.group(1), path, value)) | |
1107 continue | |
1108 def_match = DEF_PATTERN.match(line) | |
1109 if def_match: | |
1110 name = def_match.group(1).lower() | |
1111 value = ParseCondition(def_match.group(2).strip()) | |
1112 if not value: | |
1113 return False | |
1114 defs[name] = value | |
1115 continue | |
1116 prefix_match = PREFIX_PATTERN.match(line) | |
1117 if prefix_match: | |
1118 prefix = _SplitPath(prefix_match.group(1).strip()) | |
1119 continue | |
1120 print 'Malformed line: "%s".' % line | |
1121 return False | |
1122 return True | |
1123 | |
1124 | |
1125 # --------------- | |
1126 # --- M a i n --- | |
1127 # --------------- | |
1128 | |
1129 | |
1130 def BuildOptions(): | |
1131 """Configures the Python optparse library with the cmdline for test.py.""" | |
1132 result = optparse.OptionParser() | |
1133 result.add_option( | |
1134 '-m', '--mode', | |
1135 help='The test modes in which to run (comma-separated)', | |
1136 metavar='[all,debug,release]', | |
1137 default='debug') | |
1138 result.add_option( | |
1139 '-v', '--verbose', | |
1140 help='Verbose output', | |
1141 default=False, | |
1142 action='store_true') | |
1143 result.add_option( | |
1144 '-p', '--progress', | |
1145 help='The style of progress indicator (verbose, line, color, mono)', | |
1146 choices=PROGRESS_INDICATORS.keys(), | |
1147 default=None) | |
1148 result.add_option( | |
1149 '--report', | |
1150 help='Print a summary of the tests to be run', | |
1151 default=False, | |
1152 action='store_true') | |
1153 result.add_option( | |
1154 '--list', | |
1155 help='List all the tests, but don\'t run them', | |
1156 default=False, | |
1157 action='store_true') | |
1158 result.add_option( | |
1159 '-s', '--suite', | |
1160 help='A test suite', | |
1161 default=[], | |
1162 action='append') | |
1163 result.add_option( | |
1164 '-t', '--timeout', | |
1165 help='Timeout in seconds', | |
1166 default=None, | |
1167 type='int') | |
1168 result.add_option( | |
1169 '--checked', | |
1170 help='Run tests in checked mode', | |
1171 default=False, | |
1172 action='store_true') | |
1173 result.add_option( | |
1174 '--flag', | |
1175 help='Pass this additional flag to the VM or the program running the test'
, | |
1176 default=[], | |
1177 action='append') | |
1178 result.add_option( | |
1179 '--arch', | |
1180 help='The architecture to run tests for', | |
1181 metavar='[all,ia32,x64,simarm,arm]', | |
1182 default=ARCH_GUESS) | |
1183 result.add_option( | |
1184 '--os', | |
1185 help='The OS to run tests on', | |
1186 default=OS_GUESS) | |
1187 result.add_option( | |
1188 '--valgrind', | |
1189 help='Run tests through valgrind', | |
1190 default=False, | |
1191 action='store_true') | |
1192 result.add_option( | |
1193 '-j', '--tasks', | |
1194 help='The number of parallel tasks to run', | |
1195 metavar=testing.HOST_CPUS, | |
1196 default=testing.USE_DEFAULT_CPUS, | |
1197 type='int') | |
1198 result.add_option( | |
1199 '--time', | |
1200 help='Print timing information after running', | |
1201 default=False, | |
1202 action='store_true') | |
1203 result.add_option( | |
1204 '--executable', | |
1205 help='The executable with which to run the tests', | |
1206 default=None) | |
1207 result.add_option( | |
1208 '--keep_temporary_files', | |
1209 help='Do not delete temporary files after running the tests', | |
1210 default=False, | |
1211 action='store_true') | |
1212 result.add_option( | |
1213 '--batch', | |
1214 help='Run multiple tests for dartc component in a single vm', | |
1215 choices=['true', 'false'], | |
1216 default='true', | |
1217 type='choice') | |
1218 result.add_option( | |
1219 '--optimize', | |
1220 help='Invoke dart compiler with --optimize flag', | |
1221 default=False, | |
1222 action='store_true') | |
1223 result.add_option( | |
1224 '-c', '--component', | |
1225 help='The component to test against ' | |
1226 '(most, vm, dartc, frog, frogsh, leg, chromium, dartium, webdriver)', | |
1227 metavar='[most,vm,dartc,chromium,dartium]', | |
1228 default='vm') | |
1229 return result | |
1230 | |
1231 | |
1232 def ProcessOptions(options): | |
1233 """Process command line options.""" | |
1234 if options.arch == 'all': | |
1235 options.arch = 'ia32,x64,simarm' | |
1236 if options.mode == 'all': | |
1237 options.mode = 'debug,release' | |
1238 if options.component == 'most': | |
1239 options.component = 'vm,dartc' | |
1240 | |
1241 # By default we run with a higher timeout setting in when running on | |
1242 # a simulated architecture and in debug mode. | |
1243 if not options.timeout: | |
1244 options.timeout = TIMEOUT_SECS | |
1245 if 'dartc' in options.component: | |
1246 options.timeout *= 4 | |
1247 elif 'chromium' in options.component: | |
1248 options.timeout *= 4 | |
1249 elif 'dartium' in options.component: | |
1250 options.timeout *= 4 | |
1251 elif 'debug' in options.mode: | |
1252 options.timeout *= 2 | |
1253 options.mode = options.mode.split(',') | |
1254 options.arch = options.arch.split(',') | |
1255 options.component = options.component.split(',') | |
1256 for mode in options.mode: | |
1257 if not mode in ['debug', 'release']: | |
1258 print 'Unknown mode %s' % mode | |
1259 return False | |
1260 for arch in options.arch: | |
1261 if not arch in ['ia32', 'x64', 'simarm', 'arm']: | |
1262 print 'Unknown arch %s' % arch | |
1263 return False | |
1264 for component in options.component: | |
1265 if not component in ['vm', 'dartc', 'frog', 'frogsh', 'leg', | |
1266 'chromium', 'dartium', 'frogium', 'webdriver']: | |
1267 print 'Unknown component %s' % component | |
1268 return False | |
1269 options.flags = [] | |
1270 options.flags.append('--ignore-unrecognized-flags') | |
1271 if options.checked: | |
1272 options.flags.append('--enable_asserts') | |
1273 options.flags.append('--enable_type_checks') | |
1274 if options.optimize: | |
1275 options.flags.append('--optimize') | |
1276 for flag in options.flag: | |
1277 options.flags.append(flag) | |
1278 if options.verbose: | |
1279 print 'Flags on the command line:' | |
1280 for x in options.flags: | |
1281 print x | |
1282 # If the user hasn't specified the progress indicator, we pick | |
1283 # a good one depending on the setting of the verbose option. | |
1284 if not options.progress: | |
1285 if options.verbose: options.progress = 'verbose' | |
1286 else: options.progress = 'mono' | |
1287 # Options for future use. Such as Windows runner support. | |
1288 options.suppress_dialogs = True | |
1289 options.special_command = None | |
1290 return True | |
1291 | |
1292 | |
1293 REPORT_TEMPLATE = """\ | |
1294 Total: %(total)i tests | |
1295 * %(skipped)4d tests will be skipped | |
1296 * %(nocrash)4d tests are expected to be flaky but not crash | |
1297 * %(pass)4d tests are expected to pass | |
1298 * %(fail_ok)4d tests are expected to fail that we won't fix | |
1299 * %(fail)4d tests are expected to fail that we should fix | |
1300 * %(crash)4d tests are expected to crash that we should fix | |
1301 * %(batched)4d tests are running in batch mode\ | |
1302 """ | |
1303 | |
1304 | |
1305 def PrintReport(cases): | |
1306 """Print a breakdown of which tests are marked pass/skip/fail.""" | |
1307 | |
1308 def IsFlaky(o): | |
1309 return ((testing.PASS in o) and (testing.FAIL in o) | |
1310 and (not testing.CRASH in o) and (not testing.OKAY in o)) | |
1311 | |
1312 def IsFailOk(o): | |
1313 return (len(o) == 2) and (testing.FAIL in o) and (testing.OKAY in o) | |
1314 | |
1315 unskipped = [c for c in cases if not testing.SKIP in c.outcomes] | |
1316 print REPORT_TEMPLATE % { | |
1317 'total': len(cases), | |
1318 'skipped': len(cases) - len(unskipped), | |
1319 'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]), | |
1320 'pass': len([t for t in unskipped | |
1321 if list(t.outcomes) == [testing.PASS]]), | |
1322 'fail_ok': len([t for t in unskipped | |
1323 if IsFailOk(t.outcomes)]), | |
1324 'fail': len([t for t in unskipped | |
1325 if list(t.outcomes) == [testing.FAIL]]), | |
1326 'crash': len([t for t in unskipped | |
1327 if list(t.outcomes) == [testing.CRASH]]), | |
1328 'batched': len([t for t in unskipped if t.case.IsBatchable()]) | |
1329 } | |
1330 | |
1331 | |
1332 def PrintTests(cases): | |
1333 """Print a table of the tests to be run (--list cmdline option).""" | |
1334 has_errors = False | |
1335 for case in cases: | |
1336 try: | |
1337 case.case.GetCommand() | |
1338 except: | |
1339 # Python can throw an exception while parsing the .dart file. | |
1340 # We don't want to end the program. | |
1341 # TODO(zundel): something better... its a bit of a hack. | |
1342 sys.stderr.write(case.case.filename + '\n') | |
1343 has_errors = True | |
1344 if has_errors: | |
1345 raise Exception('Errors in above files') | |
1346 for case in [c for c in cases if not testing.SKIP in c.outcomes]: | |
1347 print '%s\t%s\t%s\t%s' %('/'.join(case.case.path), | |
1348 ','.join(case.outcomes), | |
1349 case.case.IsNegative(), | |
1350 '\t'.join(case.case.GetCommand()[1:])) | |
1351 | |
1352 | |
1353 class Pattern(object): | |
1354 """Convenience class to hold a compiled re pattern.""" | |
1355 | |
1356 def __init__(self, pattern): | |
1357 self.pattern = pattern | |
1358 self.compiled = None | |
1359 | |
1360 def match(self, buf): | |
1361 if not self.compiled: | |
1362 pattern = '^%s$' % self.pattern.replace('*', '.*') | |
1363 self.compiled = re.compile(pattern) | |
1364 return self.compiled.match(buf) | |
1365 | |
1366 def __str__(self): | |
1367 return self.pattern | |
1368 | |
1369 | |
1370 def _SplitPath(s): | |
1371 """Split a path into directories - opposite of os.path.join()?""" | |
1372 stripped = [c.strip() for c in s.split('/')] | |
1373 return [Pattern(s) for s in stripped if s] | |
1374 | |
1375 | |
1376 def GetSpecialCommandProcessor(value): | |
1377 if (not value) or (value.find('@') == -1): | |
1378 | |
1379 def ExpandCommand(args): | |
1380 return args | |
1381 | |
1382 return ExpandCommand | |
1383 else: | |
1384 pos = value.find('@') | |
1385 prefix = urllib.unquote(value[:pos]).split() | |
1386 suffix = urllib.unquote(value[pos+1:]).split() | |
1387 | |
1388 def ExpandCommand(args): | |
1389 return prefix + args + suffix | |
1390 | |
1391 return ExpandCommand | |
1392 | |
1393 | |
1394 def GetSuites(test_root): | |
1395 def IsSuite(path): | |
1396 return os.path.isdir(path) and os.path.exists( | |
1397 os.path.join(path, 'testcfg.py')) | |
1398 return [f for f in os.listdir(test_root) if IsSuite( | |
1399 os.path.join(test_root, f))] | |
1400 | |
1401 | |
1402 def FormatTime(d): | |
1403 millis = round(d * 1000) % 1000 | |
1404 return time.strftime('%M:%S.', time.gmtime(d)) + ('%03i' % millis) | |
1405 | |
1406 | |
1407 def Main(): | |
1408 """Main loop.""" | |
1409 script_start_time = time.time(); | |
1410 utils.ConfigureJava() | |
1411 parser = BuildOptions() | |
1412 (options, args) = parser.parse_args() | |
1413 if not ProcessOptions(options): | |
1414 parser.print_help() | |
1415 return 1 | |
1416 | |
1417 client = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '..')) | |
1418 repositories = [] | |
1419 for component in os.listdir(client) + ['.']: | |
1420 test_path = os.path.join(client, component, 'tests') | |
1421 if os.path.exists(test_path) and os.path.isdir(test_path): | |
1422 suites = GetSuites(test_path) | |
1423 repositories += [TestRepository(os.path.join(test_path, name)) | |
1424 for name in suites] | |
1425 repositories += [TestRepository(a) for a in options.suite] | |
1426 | |
1427 root = LiteralTestSuite(repositories) | |
1428 if args: | |
1429 paths = [] | |
1430 for arg in args: | |
1431 path = _SplitPath(arg) | |
1432 paths.append(path) | |
1433 else: | |
1434 paths = [_SplitPath(t) for t in BUILT_IN_TESTS] | |
1435 | |
1436 # Check for --valgrind option. If enabled, we overwrite the special | |
1437 # command flag with a command that uses the tools/valgrind.py script. | |
1438 if options.valgrind: | |
1439 run_valgrind = os.path.join(client, 'runtime', 'tools', 'valgrind.py') | |
1440 options.special_command = 'python -u ' + run_valgrind + ' @' | |
1441 | |
1442 context = Context(client, | |
1443 options.verbose, | |
1444 options.os, | |
1445 options.timeout, | |
1446 GetSpecialCommandProcessor(options.special_command), | |
1447 options.suppress_dialogs, | |
1448 options.executable, | |
1449 options.flags, | |
1450 options.keep_temporary_files, | |
1451 options.batch, | |
1452 options.checked) | |
1453 | |
1454 # Get status for tests | |
1455 sections = [] | |
1456 defs = {} | |
1457 root.GetTestStatus(context, sections, defs) | |
1458 config = Configuration(sections, defs) | |
1459 | |
1460 # List the tests | |
1461 all_cases = [] | |
1462 all_unused = [] | |
1463 globally_unused_rules = None | |
1464 for path in paths: | |
1465 for mode in options.mode: | |
1466 for arch in options.arch: | |
1467 for component in options.component: | |
1468 env = { | |
1469 'mode': mode, | |
1470 'system': utils.GuessOS(), | |
1471 'arch': arch, | |
1472 'component': component, | |
1473 'checked': options.checked, | |
1474 'unchecked': not options.checked, | |
1475 } | |
1476 test_list = root.ListTests([], path, context, mode, arch, component) | |
1477 (cases, unused_rules, unused_outcomes) = config.ClassifyTests( | |
1478 test_list, env) | |
1479 if globally_unused_rules is None: | |
1480 globally_unused_rules = set(unused_rules) | |
1481 else: | |
1482 globally_unused_rules = ( | |
1483 globally_unused_rules.intersection(unused_rules)) | |
1484 all_cases += cases | |
1485 all_unused.append(unused_rules) | |
1486 | |
1487 if options.report: | |
1488 PrintReport(all_cases) | |
1489 | |
1490 if options.list: | |
1491 PrintTests(all_cases) | |
1492 return 0 | |
1493 | |
1494 result = None | |
1495 | |
1496 def DoSkip(case): | |
1497 return testing.SKIP in case.outcomes or testing.SLOW in case.outcomes | |
1498 | |
1499 cases_to_run = [c for c in all_cases if not DoSkip(c)] | |
1500 # Creating test cases may generate temporary files. Make sure | |
1501 # Skipped tests clean up these files. | |
1502 for c in all_cases: | |
1503 if DoSkip(c): c.case.Cleanup() | |
1504 | |
1505 if cases_to_run: | |
1506 try: | |
1507 start = time.time() | |
1508 if RunTestCases(cases_to_run, options.progress, options.tasks, | |
1509 context, script_start_time): | |
1510 result = 0 | |
1511 else: | |
1512 result = 1 | |
1513 duration = time.time() - start | |
1514 except KeyboardInterrupt: | |
1515 print 'Exiting on KeyboardInterrupt' | |
1516 return 1 | |
1517 else: | |
1518 print 'No tests to run.' | |
1519 return 0 | |
1520 | |
1521 if options.time: | |
1522 print | |
1523 print '--- Total time: %s ---' % FormatTime(duration) | |
1524 timed_tests = [t.case for t in cases_to_run if not t.case.duration is None] | |
1525 timed_tests.sort(lambda a, b: a.CompareTime(b)) | |
1526 index = 1 | |
1527 for entry in timed_tests[:20]: | |
1528 t = FormatTime(entry.duration) | |
1529 print '%4i (%s) %s' % (index, t, entry.GetLabel()) | |
1530 index += 1 | |
1531 | |
1532 return result | |
1533 | |
1534 | |
1535 if __name__ == '__main__': | |
1536 sys.exit(Main()) | |
OLD | NEW |