Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(237)

Side by Side Diff: tools/testrunner/local/execution.py

Issue 307553003: Add flag to test harness to stop sorting test cases. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed comments by Jakob Kummerow. Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « tools/run-tests.py ('k') | tools/testrunner/objects/context.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2012 the V8 project authors. All rights reserved. 1 # Copyright 2012 the V8 project authors. All rights reserved.
2 # Redistribution and use in source and binary forms, with or without 2 # Redistribution and use in source and binary forms, with or without
3 # modification, are permitted provided that the following conditions are 3 # modification, are permitted provided that the following conditions are
4 # met: 4 # met:
5 # 5 #
6 # * Redistributions of source code must retain the above copyright 6 # * Redistributions of source code must retain the above copyright
7 # notice, this list of conditions and the following disclaimer. 7 # notice, this list of conditions and the following disclaimer.
8 # * Redistributions in binary form must reproduce the above 8 # * Redistributions in binary form must reproduce the above
9 # copyright notice, this list of conditions and the following 9 # copyright notice, this list of conditions and the following
10 # disclaimer in the documentation and/or other materials provided 10 # disclaimer in the documentation and/or other materials provided
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
58 return (job.id, output, time.time() - start_time) 58 return (job.id, output, time.time() - start_time)
59 59
60 class Runner(object): 60 class Runner(object):
61 61
62 def __init__(self, suites, progress_indicator, context): 62 def __init__(self, suites, progress_indicator, context):
63 datapath = os.path.join("out", "testrunner_data") 63 datapath = os.path.join("out", "testrunner_data")
64 self.perf_data_manager = perfdata.PerfDataManager(datapath) 64 self.perf_data_manager = perfdata.PerfDataManager(datapath)
65 self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) 65 self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
66 self.tests = [ t for s in suites for t in s.tests ] 66 self.tests = [ t for s in suites for t in s.tests ]
67 for t in self.tests: 67 for t in self.tests:
68 t.duration = self.perfdata.FetchPerfData(t) or 1.0 68 t.duration = self.perfdata.FetchPerfData(t) or 1.0
Michael Achenbach 2014/05/28 11:08:32 If perf data is conditionally not used, maybe it s
Michael Starzinger 2014/05/28 11:24:56 IMHO we should still harvest perf-data (assuming b
69 self._CommonInit(len(self.tests), progress_indicator, context) 69 self._CommonInit(len(self.tests), progress_indicator, context)
70 70
71 def _CommonInit(self, num_tests, progress_indicator, context): 71 def _CommonInit(self, num_tests, progress_indicator, context):
72 self.indicator = progress_indicator 72 self.indicator = progress_indicator
73 progress_indicator.runner = self 73 progress_indicator.runner = self
74 self.context = context 74 self.context = context
75 self.succeeded = 0 75 self.succeeded = 0
76 self.total = num_tests 76 self.total = num_tests
77 self.remaining = num_tests 77 self.remaining = num_tests
78 self.failed = [] 78 self.failed = []
79 self.crashed = 0 79 self.crashed = 0
80 80
81 def Run(self, jobs): 81 def Run(self, jobs):
82 self.indicator.Starting() 82 self.indicator.Starting()
83 self._RunInternal(jobs) 83 self._RunInternal(jobs)
84 self.indicator.Done() 84 self.indicator.Done()
85 if self.failed or self.remaining: 85 if self.failed or self.remaining:
86 return 1 86 return 1
87 return 0 87 return 0
88 88
89 def _RunInternal(self, jobs): 89 def _RunInternal(self, jobs):
90 pool = Pool(jobs) 90 pool = Pool(jobs)
91 test_map = {} 91 test_map = {}
92 # TODO(machenbach): Instead of filling the queue completely before 92 # TODO(machenbach): Instead of filling the queue completely before
93 # pool.imap_unordered, make this a generator that already starts testing 93 # pool.imap_unordered, make this a generator that already starts testing
94 # while the queue is filled. 94 # while the queue is filled.
95 queue = [] 95 queue = []
96 queued_exception = None 96 queued_exception = None
97 for test in sorted(self.tests, key=lambda t: t.duration, reverse=True): 97 if not self.context.no_sorting:
98 self.tests = sorted(self.tests, key=lambda t: t.duration, reverse=True)
Michael Achenbach 2014/05/28 11:08:32 Not totally happy with replacing generators with c
Jakob Kummerow 2014/05/28 11:12:16 Right, should probably use self.tests.sort(key=...
Michael Starzinger 2014/05/28 11:24:56 Will upload a follow-up CL to address this.
99 for test in self.tests:
98 assert test.id >= 0 100 assert test.id >= 0
99 test_map[test.id] = test 101 test_map[test.id] = test
100 try: 102 try:
101 command = self.GetCommand(test) 103 command = self.GetCommand(test)
102 except Exception, e: 104 except Exception, e:
103 # If this failed, save the exception and re-raise it later (after 105 # If this failed, save the exception and re-raise it later (after
104 # all other tests have had a chance to run). 106 # all other tests have had a chance to run).
105 queued_exception = e 107 queued_exception = e
106 continue 108 continue
107 timeout = self.context.timeout 109 timeout = self.context.timeout
(...skipping 16 matching lines...) Expand all
124 test.duration = result[2] 126 test.duration = result[2]
125 has_unexpected_output = test.suite.HasUnexpectedOutput(test) 127 has_unexpected_output = test.suite.HasUnexpectedOutput(test)
126 if has_unexpected_output: 128 if has_unexpected_output:
127 self.failed.append(test) 129 self.failed.append(test)
128 if test.output.HasCrashed(): 130 if test.output.HasCrashed():
129 self.crashed += 1 131 self.crashed += 1
130 else: 132 else:
131 self.succeeded += 1 133 self.succeeded += 1
132 self.remaining -= 1 134 self.remaining -= 1
133 try: 135 try:
134 self.perfdata.UpdatePerfData(test) 136 self.perfdata.UpdatePerfData(test)
Michael Achenbach 2014/05/28 11:08:32 Also here...
Jakob Kummerow 2014/05/28 11:12:16 I disagree. Updating the perfstore should be fast
135 except Exception, e: 137 except Exception, e:
136 print("UpdatePerfData exception: %s" % e) 138 print("UpdatePerfData exception: %s" % e)
137 pass # Just keep working. 139 pass # Just keep working.
138 self.indicator.HasRun(test, has_unexpected_output) 140 self.indicator.HasRun(test, has_unexpected_output)
139 finally: 141 finally:
140 pool.terminate() 142 pool.terminate()
141 self.perf_data_manager.close() 143 self.perf_data_manager.close()
142 if queued_exception: 144 if queued_exception:
143 raise queued_exception 145 raise queued_exception
144 146
(...skipping 12 matching lines...) Expand all
157 test.suite.GetFlagsForTestCase(test, self.context) + 159 test.suite.GetFlagsForTestCase(test, self.context) +
158 self.context.extra_flags) 160 self.context.extra_flags)
159 return cmd 161 return cmd
160 162
161 163
162 class BreakNowException(Exception): 164 class BreakNowException(Exception):
163 def __init__(self, value): 165 def __init__(self, value):
164 self.value = value 166 self.value = value
165 def __str__(self): 167 def __str__(self):
166 return repr(self.value) 168 return repr(self.value)
OLDNEW
« no previous file with comments | « tools/run-tests.py ('k') | tools/testrunner/objects/context.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698