Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(103)

Side by Side Diff: third_party/WebKit/Tools/Scripts/webkitpy/layout_tests/run_webkit_tests.py

Issue 1783073002: Run auto-formatter on files in webkitpy/layout_tests/. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Ran yapf -i --style '{based_on_style: pep8, column_limit: 132}' then did manual fix-up Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2010 Google Inc. All rights reserved. 1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # Copyright (C) 2011 Apple Inc. All rights reserved. 3 # Copyright (C) 2011 Apple Inc. All rights reserved.
4 # 4 #
5 # Redistribution and use in source and binary forms, with or without 5 # Redistribution and use in source and binary forms, with or without
6 # modification, are permitted provided that the following conditions are 6 # modification, are permitted provided that the following conditions are
7 # met: 7 # met:
8 # 8 #
9 # * Redistributions of source code must retain the above copyright 9 # * Redistributions of source code must retain the above copyright
10 # notice, this list of conditions and the following disclaimer. 10 # notice, this list of conditions and the following disclaimer.
(...skipping 28 matching lines...) Expand all
39 from webkitpy.layout_tests.controllers.manager import Manager 39 from webkitpy.layout_tests.controllers.manager import Manager
40 from webkitpy.layout_tests.models import test_run_results 40 from webkitpy.layout_tests.models import test_run_results
41 from webkitpy.layout_tests.port.factory import configuration_options, platform_o ptions 41 from webkitpy.layout_tests.port.factory import configuration_options, platform_o ptions
42 from webkitpy.layout_tests.views import buildbot_results 42 from webkitpy.layout_tests.views import buildbot_results
43 from webkitpy.layout_tests.views import printing 43 from webkitpy.layout_tests.views import printing
44 from webkitpy.layout_tests.generate_results_dashboard import DashBoardGenerator 44 from webkitpy.layout_tests.generate_results_dashboard import DashBoardGenerator
45 45
46 _log = logging.getLogger(__name__) 46 _log = logging.getLogger(__name__)
47 47
48 48
49
50 def main(argv, stdout, stderr): 49 def main(argv, stdout, stderr):
51 options, args = parse_args(argv) 50 options, args = parse_args(argv)
52 51
53 if options.platform and 'test' in options.platform and not 'browser_test' in options.platform: 52 if options.platform and 'test' in options.platform and not 'browser_test' in options.platform:
54 # It's a bit lame to import mocks into real code, but this allows the us er 53 # It's a bit lame to import mocks into real code, but this allows the us er
55 # to run tests against the test platform interactively, which is useful for 54 # to run tests against the test platform interactively, which is useful for
56 # debugging test failures. 55 # debugging test failures.
57 from webkitpy.common.host_mock import MockHost 56 from webkitpy.common.host_mock import MockHost
58 host = MockHost() 57 host = MockHost()
59 else: 58 else:
(...skipping 18 matching lines...) Expand all
78 except BaseException as e: 77 except BaseException as e:
79 if isinstance(e, Exception): 78 if isinstance(e, Exception):
80 print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e)) 79 print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
81 traceback.print_exc(file=stderr) 80 traceback.print_exc(file=stderr)
82 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS 81 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS
83 82
84 83
85 def parse_args(args): 84 def parse_args(args):
86 option_group_definitions = [] 85 option_group_definitions = []
87 86
87 option_group_definitions.append(("Platform options", platform_options()))
88
89 option_group_definitions.append(("Configuration options", configuration_opti ons()))
90
91 option_group_definitions.append(("Printing Options", printing.print_options( )))
92
88 option_group_definitions.append( 93 option_group_definitions.append(
89 ("Platform options", platform_options())) 94 ("Android-specific Options",
95 [
96 optparse.make_option("--adb-device",
97 action="append",
98 default=[],
99 help="Run Android layout tests on these device s."),
100 # FIXME: Flip this to be off by default once we can log the
101 # device setup more cleanly.
102 optparse.make_option("--no-android-logging",
103 dest="android_logging",
104 action="store_false",
105 default=True,
106 help=("Do not log android-specific debug messa ges (default is to log as part "
107 "of --debug-rwt-logging")),
108 ]))
90 109
91 option_group_definitions.append( 110 option_group_definitions.append(
92 ("Configuration options", configuration_options())) 111 ("Results Options",
112 [
113 optparse.make_option("--add-platform-exceptions",
114 action="store_true",
115 default=False,
116 help=("Save generated results into the *most-s pecific-platform* directory rather "
117 "than the *generic-platform* directory") ),
118 optparse.make_option("--additional-driver-flag",
119 "--additional-drt-flag",
120 dest="additional_driver_flag",
121 action="append",
122 default=[],
123 help=("Additional command line flag to pass to the driver. Specify multiple "
124 "times to add multiple flags.")),
125 optparse.make_option("--additional-expectations",
126 action="append",
127 default=[],
128 help=("Path to a test_expectations file that w ill override previous "
129 "expectations. Specify multiple times fo r multiple sets of overrides.")),
130 optparse.make_option("--additional-platform-directory",
131 action="append",
132 default=[],
133 help=("Additional directory where to look for test baselines (will take "
134 "precedence over platform baselines). Sp ecify multiple times to add "
135 "multiple search path entries.")),
136 optparse.make_option("--build-directory",
137 help=("Path to the directory under which build files are kept (should not "
138 "include configuration)")),
139 optparse.make_option("--clobber-old-results",
140 action="store_true",
141 default=False,
142 help="Clobbers test results from previous runs ."),
143 optparse.make_option("--compare-port",
144 action="store",
145 default=None,
146 help="Use the specified port's baselines first "),
147 optparse.make_option("--driver-name",
148 type="string",
149 help="Alternative driver binary to use"),
150 optparse.make_option("--full-results-html",
151 action="store_true",
152 default=False,
153 help="Show all failures in results.html, rathe r than only regressions"),
154 optparse.make_option("--new-baseline",
155 action="store_true",
156 default=False,
157 help=("Save generated results as new baselines into the *most-specific-platform* "
158 "directory, overwriting whatever's alrea dy there. Equivalent to "
159 "--reset-results --add-platform-exceptio ns")),
160 # TODO(ojan): Remove once bots stop using it.
161 optparse.make_option("--no-new-test-results",
162 help="This doesn't do anything. TODO(ojan): Re move once bots stop using it."),
163 optparse.make_option("--new-test-results",
164 action="store_true",
165 default=False,
166 help="Create new baselines when no expected re sults exist"),
167 optparse.make_option("--no-show-results",
168 dest="show_results",
169 action="store_false",
170 default=True,
171 help="Don't launch a browser with results afte r the tests are done"),
172 optparse.make_option("-p",
173 "--pixel",
174 "--pixel-tests",
175 dest="pixel_tests",
176 action="store_true",
177 help="Enable pixel-to-pixel PNG comparisons"),
178 optparse.make_option("--no-pixel",
179 "--no-pixel-tests",
180 dest="pixel_tests",
181 action="store_false",
182 help="Disable pixel-to-pixel PNG comparisons") ,
183 # FIXME: we should support a comma separated list with
184 # --pixel-test-directory as well.
185 optparse.make_option("--pixel-test-directory",
186 dest="pixel_test_directories",
187 action="append",
188 default=[],
189 help=("A directory where it is allowed to exec ute tests as pixel tests. Specify "
190 "multiple times to add multiple director ies. This option implies "
191 "--pixel-tests. If specified, only those tests will be executed as pixel "
192 "tests that are located in one of the"
193 " directories enumerated with the "
194 "option. Some ports may ignore this opti on while others can have a default "
195 "value that can be overridden here.")),
196 optparse.make_option("--reset-results",
197 action="store_true",
198 default=False,
199 help="Reset expectations to the generated resu lts in their existing location."),
200 optparse.make_option("--results-directory",
201 help="Location of test results"),
202 optparse.make_option("--skip-failing-tests",
203 action="store_true",
204 default=False,
205 help=("Skip tests that are expected to fail. N ote: When using this option, "
206 "you might miss new crashes in these tes ts.")),
207 optparse.make_option("--smoke",
208 action="store_true",
209 help="Run just the SmokeTests"),
210 optparse.make_option("--no-smoke",
211 dest="smoke",
212 action="store_false",
213 help="Do not run just the SmokeTests"),
214 ]))
93 215
94 option_group_definitions.append( 216 option_group_definitions.append(
95 ("Printing Options", printing.print_options())) 217 ("Testing Options",
96 218 [
97 option_group_definitions.append( 219 optparse.make_option("--additional-env-var",
98 ("Android-specific Options", [ 220 type="string",
99 optparse.make_option( 221 action="append",
100 "--adb-device", 222 default=[],
101 action="append", 223 help=("Passes that environment variable to the tests "
102 default=[], 224 "(--additional-env-var=NAME=VALUE)")),
103 help="Run Android layout tests on these devices."), 225 optparse.make_option("--batch-size",
104 # FIXME: Flip this to be off by default once we can log the 226 type="int",
105 # device setup more cleanly. 227 default=None,
106 optparse.make_option( 228 help=("Run a the tests in batches (n), after e very n tests, the driver is "
107 "--no-android-logging", 229 "relaunched.")),
108 dest="android_logging", 230 optparse.make_option("--build",
109 action="store_false", 231 dest="build",
110 default=True, 232 action="store_true",
111 help=("Do not log android-specific debug messages (default is to log as part " 233 default=True,
112 "of --debug-rwt-logging")), 234 help=("Check to ensure the build is up-to-date (default).")),
113 ])) 235 optparse.make_option("--no-build",
114 236 dest="build",
115 option_group_definitions.append( 237 action="store_false",
116 ("Results Options", [ 238 help="Don't check to see if the build is up-to -date."),
117 optparse.make_option( 239 optparse.make_option("--child-processes",
118 "--add-platform-exceptions", 240 help="Number of drivers to run in parallel."),
119 action="store_true", 241 optparse.make_option("--enable-wptserve",
120 default=False, 242 dest="enable_wptserve",
121 help=("Save generated results into the *most-specific-platform* directory rather " 243 action="store_true",
122 "than the *generic-platform* directory")), 244 default=False,
123 optparse.make_option( 245 help="Enable running web-platform-tests using WPTserve instead of Apache."),
124 "--additional-driver-flag", 246 optparse.make_option("--disable-breakpad",
125 "--additional-drt-flag", 247 action="store_true",
126 dest="additional_driver_flag", 248 help="Don't use breakpad to symbolize unexpect ed crashes."),
127 action="append", 249 optparse.make_option("--driver-logging",
128 default=[], 250 action="store_true",
129 help=("Additional command line flag to pass to the driver. Speci fy multiple " 251 help="Print detailed logging of the driver/con tent_shell"),
130 "times to add multiple flags.")), 252 optparse.make_option("--enable-leak-detection",
131 optparse.make_option( 253 action="store_true",
132 "--additional-expectations", 254 help="Enable the leak detection of DOM objects ."),
133 action="append", 255 optparse.make_option("--enable-sanitizer",
134 default=[], 256 action="store_true",
135 help=("Path to a test_expectations file that will override previ ous " 257 help="Only alert on sanitizer-related errors a nd crashes"),
136 "expectations. Specify multiple times for multiple sets of overrides.")), 258 optparse.make_option("--exit-after-n-crashes-or-timeouts",
137 optparse.make_option( 259 type="int",
138 "--additional-platform-directory", 260 default=None,
139 action="append", 261 help="Exit after the first N crashes instead o f running all tests"),
140 default=[], 262 optparse.make_option("--exit-after-n-failures",
141 help=("Additional directory where to look for test baselines (wi ll take " 263 type="int",
142 "precedence over platform baselines). Specify multiple tim es to add " 264 default=None,
143 "multiple search path entries.")), 265 help="Exit after the first N failures instead of running all tests"),
144 optparse.make_option( 266 optparse.make_option("--ignore-builder-category",
145 "--build-directory", 267 action="store",
146 help=("Path to the directory under which build files are kept (s hould not " 268 help=("The category of builders to use with th e --ignore-flaky-tests option "
147 "include configuration)")), 269 "('layout' or 'deps').")),
148 optparse.make_option( 270 optparse.make_option("--ignore-flaky-tests",
149 "--clobber-old-results", 271 action="store",
150 action="store_true", 272 help=("Control whether tests that are flaky on the bots get ignored. "
151 default=False, 273 "'very-flaky' == Ignore any tests that f laked more than once on the bot. "
152 help="Clobbers test results from previous runs."), 274 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
153 optparse.make_option( 275 "'unexpected' == Ignore any tests that h ad unexpected results on the bot.")),
154 "--compare-port", 276 optparse.make_option("--iterations",
155 action="store", 277 type="int",
156 default=None, 278 default=1,
157 help="Use the specified port's baselines first"), 279 help="Number of times to run the set of tests (e.g. ABCABCABC)"),
158 optparse.make_option( 280 optparse.make_option("--max-locked-shards",
159 "--driver-name", 281 type="int",
160 type="string", 282 default=0,
161 help="Alternative driver binary to use"), 283 help="Set the maximum number of locked shards" ),
162 optparse.make_option( 284 optparse.make_option("--nocheck-sys-deps",
163 "--full-results-html", 285 action="store_true",
164 action="store_true", 286 default=False,
165 default=False, 287 help="Don't check the system dependencies (the mes)"),
166 help="Show all failures in results.html, rather than only regres sions"), 288 optparse.make_option("--order",
167 optparse.make_option( 289 action="store",
168 "--new-baseline", 290 default="natural",
169 action="store_true", 291 help=("determine the order in which the test c ases will be run. "
170 default=False, 292 "'none' == use the order in which the te sts were listed "
171 help=("Save generated results as new baselines into the *most-sp ecific-platform* " 293 "either in arguments or test list, "
172 "directory, overwriting whatever's already there. Equivale nt to " 294 "'natural' == use the natural order (def ault), "
173 "--reset-results --add-platform-exceptions")), 295 "'random-seeded' == randomize the test o rder using a fixed seed, "
174 # TODO(ojan): Remove once bots stop using it. 296 "'random' == randomize the test order.") ),
175 optparse.make_option( 297 optparse.make_option("--profile",
176 "--no-new-test-results", 298 action="store_true",
177 help="This doesn't do anything. TODO(ojan): Remove once bots sto p using it."), 299 help="Output per-test profile information."),
178 optparse.make_option( 300 optparse.make_option("--profiler",
179 "--new-test-results", 301 action="store",
180 action="store_true", 302 help="Output per-test profile information, usi ng the specified profiler."),
181 default=False, 303 optparse.make_option("--repeat-each",
182 help="Create new baselines when no expected results exist"), 304 type="int",
183 optparse.make_option( 305 default=1,
184 "--no-show-results", 306 help="Number of times to run each test (e.g. A AABBBCCC)"),
185 dest="show_results", 307 # TODO(joelo): Delete --retry-failures and --no-retry-failures as t hey
186 action="store_false", 308 # are redundant with --num-retries.
187 default=True, 309 optparse.make_option("--retry-failures",
188 help="Don't launch a browser with results after the tests are do ne"), 310 action="store_true",
189 optparse.make_option( 311 help=("Re-try any tests that produce unexpecte d results. Default is to not retry "
190 "-p", 312 "if an explicit list of tests is passed to run-webkit-tests.")),
191 "--pixel", 313 optparse.make_option("--no-retry-failures",
192 "--pixel-tests", 314 dest="retry_failures",
193 dest="pixel_tests", 315 action="store_false",
194 action="store_true", 316 help="Don't re-try any tests that produce unex pected results."),
195 help="Enable pixel-to-pixel PNG comparisons"), 317 optparse.make_option("--num-retries",
196 optparse.make_option( 318 type="int",
197 "--no-pixel", 319 default=3,
198 "--no-pixel-tests", 320 help=("Number of times to retry failures, defa ult is 3. Only relevant when "
199 dest="pixel_tests", 321 "failure retries are enabled.")),
200 action="store_false", 322 optparse.make_option("--run-chunk",
201 help="Disable pixel-to-pixel PNG comparisons"), 323 help="Run a specified chunk (n:l), the nth of len l, of the layout tests"),
202 # FIXME: we should support a comma separated list with 324 optparse.make_option("--run-part",
203 # --pixel-test-directory as well. 325 help="Run a specified part (n:m), the nth of m parts, of the layout tests"),
204 optparse.make_option( 326 optparse.make_option("--run-singly",
205 "--pixel-test-directory", 327 action="store_true",
206 dest="pixel_test_directories", 328 default=False,
207 action="append", 329 help="DEPRECATED, same as --batch-size=1 --ver bose"),
208 default=[], 330 optparse.make_option("--skipped",
209 help=("A directory where it is allowed to execute tests as pixel tests. Specify " 331 action="store",
210 "multiple times to add multiple directories. This option i mplies " 332 default=None,
211 "--pixel-tests. If specified, only those tests will be exe cuted as pixel " 333 help=("control how tests marked SKIP are run. "
212 "tests that are located in one of the" " directories enume rated with the " 334 "'default' == Skip tests unless explicit ly listed on the command line, "
213 "option. Some ports may ignore this option while others ca n have a default " 335 "'ignore' == Run them anyway, "
214 "value that can be overridden here.")), 336 "'only' == only run the SKIP tests, "
215 optparse.make_option( 337 "'always' == always skip, even if listed on the command line.")),
216 "--reset-results", 338 optparse.make_option("--fastest",
217 action="store_true", 339 action="store",
218 default=False, 340 type="float",
219 help="Reset expectations to the generated results in their exist ing location."), 341 help="Run the N% fastest tests as well as any tests listed on the command line"),
220 optparse.make_option( 342 optparse.make_option("--test-list",
221 "--results-directory", 343 action="append",
222 help="Location of test results"), 344 metavar="FILE",
223 optparse.make_option( 345 help="read list of tests to run from file"),
224 "--skip-failing-tests", 346 optparse.make_option("--time-out-ms",
225 action="store_true", 347 help="Set the timeout for each test"),
226 default=False, 348 optparse.make_option("--wrapper",
227 help=("Skip tests that are expected to fail. Note: When using th is option, " 349 help=("wrapper command to insert before invoca tions of the driver; option "
228 "you might miss new crashes in these tests.")), 350 "is split on whitespace before running. (Example: --wrapper='valgrind "
229 optparse.make_option( 351 "--smc-check=all')")),
230 "--smoke", 352 # FIXME: Display default number of child processes that will run.
231 action="store_true", 353 optparse.make_option("-f",
232 help="Run just the SmokeTests"), 354 "--fully-parallel",
233 optparse.make_option( 355 action="store_true",
234 "--no-smoke", 356 help="run all tests in parallel"),
235 dest="smoke", 357 optparse.make_option("-i",
236 action="store_false", 358 "--ignore-tests",
237 help="Do not run just the SmokeTests"), 359 action="append",
238 ])) 360 default=[],
239 361 help="directories or test to ignore (may speci fy multiple times)"),
240 option_group_definitions.append( 362 optparse.make_option("-n",
241 ("Testing Options", [ 363 "--dry-run",
242 optparse.make_option( 364 action="store_true",
243 "--additional-env-var", 365 default=False,
244 type="string", 366 help="Do everything but actually run the tests or upload results."),
245 action="append", 367 ]))
246 default=[],
247 help=("Passes that environment variable to the tests "
248 "(--additional-env-var=NAME=VALUE)")),
249 optparse.make_option(
250 "--batch-size",
251 type="int",
252 default=None,
253 help=("Run a the tests in batches (n), after every n tests, the driver is "
254 "relaunched.")),
255 optparse.make_option(
256 "--build",
257 dest="build",
258 action="store_true",
259 default=True,
260 help=("Check to ensure the build is up-to-date (default).")),
261 optparse.make_option(
262 "--no-build",
263 dest="build",
264 action="store_false",
265 help="Don't check to see if the build is up-to-date."),
266 optparse.make_option(
267 "--child-processes",
268 help="Number of drivers to run in parallel."),
269 optparse.make_option(
270 "--enable-wptserve",
271 dest="enable_wptserve",
272 action="store_true",
273 default=False,
274 help="Enable running web-platform-tests using WPTserve instead o f Apache."),
275 optparse.make_option(
276 "--disable-breakpad",
277 action="store_true",
278 help="Don't use breakpad to symbolize unexpected crashes."),
279 optparse.make_option(
280 "--driver-logging",
281 action="store_true",
282 help="Print detailed logging of the driver/content_shell"),
283 optparse.make_option(
284 "--enable-leak-detection",
285 action="store_true",
286 help="Enable the leak detection of DOM objects."),
287 optparse.make_option(
288 "--enable-sanitizer",
289 action="store_true",
290 help="Only alert on sanitizer-related errors and crashes"),
291 optparse.make_option(
292 "--exit-after-n-crashes-or-timeouts",
293 type="int",
294 default=None,
295 help="Exit after the first N crashes instead of running all test s"),
296 optparse.make_option(
297 "--exit-after-n-failures",
298 type="int",
299 default=None,
300 help="Exit after the first N failures instead of running all tes ts"),
301 optparse.make_option(
302 "--ignore-builder-category",
303 action="store",
304 help=("The category of builders to use with the --ignore-flaky-t ests option "
305 "('layout' or 'deps').")),
306 optparse.make_option(
307 "--ignore-flaky-tests",
308 action="store",
309 help=("Control whether tests that are flaky on the bots get igno red. "
310 "'very-flaky' == Ignore any tests that flaked more than on ce on the bot. "
311 "'maybe-flaky' == Ignore any tests that flaked once on the bot. "
312 "'unexpected' == Ignore any tests that had unexpected resu lts on the bot.")),
313 optparse.make_option(
314 "--iterations",
315 type="int",
316 default=1,
317 help="Number of times to run the set of tests (e.g. ABCABCABC)") ,
318 optparse.make_option(
319 "--max-locked-shards",
320 type="int",
321 default=0,
322 help="Set the maximum number of locked shards"),
323 optparse.make_option(
324 "--nocheck-sys-deps",
325 action="store_true",
326 default=False,
327 help="Don't check the system dependencies (themes)"),
328 optparse.make_option(
329 "--order",
330 action="store",
331 default="natural",
332 help=("determine the order in which the test cases will be run. "
333 "'none' == use the order in which the tests were listed "
334 "either in arguments or test list, "
335 "'natural' == use the natural order (default), "
336 "'random-seeded' == randomize the test order using a fixed seed, "
337 "'random' == randomize the test order.")),
338 optparse.make_option(
339 "--profile",
340 action="store_true",
341 help="Output per-test profile information."),
342 optparse.make_option(
343 "--profiler",
344 action="store",
345 help="Output per-test profile information, using the specified p rofiler."),
346 optparse.make_option(
347 "--repeat-each",
348 type="int",
349 default=1,
350 help="Number of times to run each test (e.g. AAABBBCCC)"),
351 # TODO(joelo): Delete --retry-failures and --no-retry-failures as th ey
352 # are redundant with --num-retries.
353 optparse.make_option(
354 "--retry-failures",
355 action="store_true",
356 help=("Re-try any tests that produce unexpected results. Default is to not retry "
357 "if an explicit list of tests is passed to run-webkit-test s.")),
358 optparse.make_option(
359 "--no-retry-failures",
360 dest="retry_failures",
361 action="store_false",
362 help="Don't re-try any tests that produce unexpected results."),
363 optparse.make_option(
364 "--num-retries",
365 type="int",
366 default=3,
367 help=("Number of times to retry failures, default is 3. Only rel evant when "
368 "failure retries are enabled.")),
369 optparse.make_option(
370 "--run-chunk",
371 help="Run a specified chunk (n:l), the nth of len l, of the layo ut tests"),
372 optparse.make_option(
373 "--run-part",
374 help="Run a specified part (n:m), the nth of m parts, of the lay out tests"),
375 optparse.make_option(
376 "--run-singly",
377 action="store_true",
378 default=False,
379 help="DEPRECATED, same as --batch-size=1 --verbose"),
380 optparse.make_option(
381 "--skipped",
382 action="store",
383 default=None,
384 help=("control how tests marked SKIP are run. "
385 "'default' == Skip tests unless explicitly listed on the c ommand line, "
386 "'ignore' == Run them anyway, "
387 "'only' == only run the SKIP tests, "
388 "'always' == always skip, even if listed on the command li ne.")),
389 optparse.make_option(
390 "--fastest",
391 action="store",
392 type="float",
393 help="Run the N% fastest tests as well as any tests listed on th e command line"),
394 optparse.make_option(
395 "--test-list",
396 action="append",
397 metavar="FILE",
398 help="read list of tests to run from file"),
399 optparse.make_option(
400 "--time-out-ms",
401 help="Set the timeout for each test"),
402 optparse.make_option(
403 "--wrapper",
404 help=("wrapper command to insert before invocations of the drive r; option "
405 "is split on whitespace before running. (Example: --wrappe r='valgrind "
406 "--smc-check=all')")),
407 # FIXME: Display default number of child processes that will run.
408 optparse.make_option(
409 "-f", "--fully-parallel",
410 action="store_true",
411 help="run all tests in parallel"),
412 optparse.make_option(
413 "-i", "--ignore-tests",
414 action="append",
415 default=[],
416 help="directories or test to ignore (may specify multiple times) "),
417 optparse.make_option(
418 "-n", "--dry-run",
419 action="store_true",
420 default=False,
421 help="Do everything but actually run the tests or upload results ."),
422 ]))
423 368
424 # FIXME: Move these into json_results_generator.py. 369 # FIXME: Move these into json_results_generator.py.
425 option_group_definitions.append( 370 option_group_definitions.append(("Result JSON Options", [
426 ("Result JSON Options", [ 371 optparse.make_option("--build-name",
427 optparse.make_option( 372 default="DUMMY_BUILD_NAME",
428 "--build-name", 373 help="The name of the builder used in its path, e.g . webkit-rel."),
429 default="DUMMY_BUILD_NAME", 374 optparse.make_option("--step-name",
430 help="The name of the builder used in its path, e.g. webkit-rel. "), 375 default="webkit_tests",
431 optparse.make_option( 376 help="The name of the step in a build running this script."),
432 "--step-name", 377 optparse.make_option("--build-number",
433 default="webkit_tests", 378 default="DUMMY_BUILD_NUMBER",
434 help="The name of the step in a build running this script."), 379 help="The build number of the builder running this script."),
435 optparse.make_option( 380 optparse.make_option("--builder-name",
436 "--build-number", 381 default="",
437 default="DUMMY_BUILD_NUMBER", 382 help=("The name of the builder shown on the waterfa ll running this script "
438 help="The build number of the builder running this script."), 383 "e.g. WebKit.")),
439 optparse.make_option( 384 optparse.make_option("--master-name",
440 "--builder-name", 385 help="The name of the buildbot master."),
441 default="", 386 optparse.make_option("--test-results-server",
442 help=("The name of the builder shown on the waterfall running th is script " 387 default="",
443 "e.g. WebKit.")), 388 help="If specified, upload results json files to th is appengine server."),
444 optparse.make_option( 389 optparse.make_option("--write-full-results-to",
445 "--master-name", 390 help=("If specified, copy full_results.json from th e results dir to the "
446 help="The name of the buildbot master."), 391 "specified path.")),
447 optparse.make_option( 392 ]))
448 "--test-results-server",
449 default="",
450 help="If specified, upload results json files to this appengine server."),
451 optparse.make_option(
452 "--write-full-results-to",
453 help=("If specified, copy full_results.json from the results dir to the "
454 "specified path.")),
455 ]))
456 393
457 option_parser = optparse.OptionParser() 394 option_parser = optparse.OptionParser()
458 395
459 for group_name, group_options in option_group_definitions: 396 for group_name, group_options in option_group_definitions:
460 option_group = optparse.OptionGroup(option_parser, group_name) 397 option_group = optparse.OptionGroup(option_parser, group_name)
461 option_group.add_options(group_options) 398 option_group.add_options(group_options)
462 option_parser.add_option_group(option_group) 399 option_parser.add_option_group(option_group)
463 400
464 return option_parser.parse_args(args) 401 return option_parser.parse_args(args)
465 402
466 403
467 def _set_up_derived_options(port, options, args): 404 def _set_up_derived_options(port, options, args):
468 """Sets the options values that depend on other options values.""" 405 """Sets the options values that depend on other options values."""
469 if options.batch_size is None: 406 if options.batch_size is None:
470 options.batch_size = port.default_batch_size() 407 options.batch_size = port.default_batch_size()
471 408
472 if not options.child_processes: 409 if not options.child_processes:
473 options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", 410 options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", str(port.default_child_processes()))
474 str(port.default_child_processe s()))
475 if not options.max_locked_shards: 411 if not options.max_locked_shards:
476 options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_S HARDS", 412 options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_S HARDS", str(port.default_max_locked_shards())))
477 str(port.default_max_lock ed_shards())))
478 413
479 if not options.configuration: 414 if not options.configuration:
480 options.configuration = port.default_configuration() 415 options.configuration = port.default_configuration()
481 416
482 if options.pixel_tests is None: 417 if options.pixel_tests is None:
483 options.pixel_tests = port.default_pixel_tests() 418 options.pixel_tests = port.default_pixel_tests()
484 419
485 if not options.time_out_ms: 420 if not options.time_out_ms:
486 options.time_out_ms = str(port.default_timeout_ms()) 421 options.time_out_ms = str(port.default_timeout_ms())
487 422
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
543 478
544 def run(port, options, args, logging_stream, stdout): 479 def run(port, options, args, logging_stream, stdout):
545 logger = logging.getLogger() 480 logger = logging.getLogger()
546 logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO ) 481 logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO )
547 482
548 printer = printing.Printer(port, options, logging_stream, logger=logger) 483 printer = printing.Printer(port, options, logging_stream, logger=logger)
549 try: 484 try:
550 run_details = _run_tests(port, options, args, printer) 485 run_details = _run_tests(port, options, args, printer)
551 printer.flush() 486 printer.flush()
552 487
553 if (not options.dry_run and 488 if (not options.dry_run and (run_details.exit_code not in test_run_resul ts.ERROR_CODES or
554 (run_details.exit_code not in test_run_results.ERROR_CODES or 489 run_details.exit_code == test_run_results.E ARLY_EXIT_STATUS) and
555 run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) an d
556 not run_details.initial_results.keyboard_interrupted): 490 not run_details.initial_results.keyboard_interrupted):
557 bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug _rwt_logging) 491 bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug _rwt_logging)
558 bot_printer.print_results(run_details) 492 bot_printer.print_results(run_details)
559 stdout.flush() 493 stdout.flush()
560 494
561 _log.debug("Generating dashboard...") 495 _log.debug("Generating dashboard...")
562 gen_dash_board = DashBoardGenerator(port) 496 gen_dash_board = DashBoardGenerator(port)
563 gen_dash_board.generate() 497 gen_dash_board.generate()
564 _log.debug("Dashboard generated.") 498 _log.debug("Dashboard generated.")
565 499
566 _log.debug("") 500 _log.debug("")
567 _log.debug("Testing completed, Exit status: %d" % run_details.exit_code) 501 _log.debug("Testing completed, Exit status: %d" % run_details.exit_code)
568 502
569 # Temporary process dump for debugging windows timeout issues, see crbug .com/522396. 503 # Temporary process dump for debugging windows timeout issues, see crbug .com/522396.
570 _log.debug("") 504 _log.debug("")
571 _log.debug("Process dump:") 505 _log.debug("Process dump:")
572 for process in port.host.executive.process_dump(): 506 for process in port.host.executive.process_dump():
573 _log.debug("\t%s" % process) 507 _log.debug("\t%s" % process)
574 508
575 return run_details 509 return run_details
576 510
577 finally: 511 finally:
578 printer.cleanup() 512 printer.cleanup()
579 513
514
580 if __name__ == '__main__': 515 if __name__ == '__main__':
581 exit_code = main(sys.argv[1:], sys.stdout, sys.stderr) 516 exit_code = main(sys.argv[1:], sys.stdout, sys.stderr)
582 sys.exit(exit_code) 517 sys.exit(exit_code)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698