OLD | NEW |
1 # Copyright (C) 2010 Google Inc. All rights reserved. | 1 # Copyright (C) 2010 Google Inc. All rights reserved. |
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged | 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze
ged |
3 # Copyright (C) 2011 Apple Inc. All rights reserved. | 3 # Copyright (C) 2011 Apple Inc. All rights reserved. |
4 # | 4 # |
5 # Redistribution and use in source and binary forms, with or without | 5 # Redistribution and use in source and binary forms, with or without |
6 # modification, are permitted provided that the following conditions are | 6 # modification, are permitted provided that the following conditions are |
7 # met: | 7 # met: |
8 # | 8 # |
9 # * Redistributions of source code must retain the above copyright | 9 # * Redistributions of source code must retain the above copyright |
10 # notice, this list of conditions and the following disclaimer. | 10 # notice, this list of conditions and the following disclaimer. |
(...skipping 27 matching lines...) Expand all Loading... |
38 from webkitpy.layout_tests.controllers.manager import Manager | 38 from webkitpy.layout_tests.controllers.manager import Manager |
39 from webkitpy.layout_tests.models import test_run_results | 39 from webkitpy.layout_tests.models import test_run_results |
40 from webkitpy.layout_tests.port import configuration_options, platform_options | 40 from webkitpy.layout_tests.port import configuration_options, platform_options |
41 from webkitpy.layout_tests.views import buildbot_results | 41 from webkitpy.layout_tests.views import buildbot_results |
42 from webkitpy.layout_tests.views import printing | 42 from webkitpy.layout_tests.views import printing |
43 from webkitpy.layout_tests.generate_results_dashboard import GenerateDashBoard | 43 from webkitpy.layout_tests.generate_results_dashboard import GenerateDashBoard |
44 | 44 |
45 _log = logging.getLogger(__name__) | 45 _log = logging.getLogger(__name__) |
46 | 46 |
47 | 47 |
48 | |
49 def main(argv, stdout, stderr): | 48 def main(argv, stdout, stderr): |
50 options, args = parse_args(argv) | 49 options, args = parse_args(argv) |
51 | 50 |
52 if options.platform and 'test' in options.platform and not 'browser_test' in
options.platform: | 51 if options.platform and 'test' in options.platform and not 'browser_test' in
options.platform: |
53 # It's a bit lame to import mocks into real code, but this allows the us
er | 52 # It's a bit lame to import mocks into real code, but this allows the us
er |
54 # to run tests against the test platform interactively, which is useful
for | 53 # to run tests against the test platform interactively, which is useful
for |
55 # debugging test failures. | 54 # debugging test failures. |
56 from webkitpy.common.host_mock import MockHost | 55 from webkitpy.common.host_mock import MockHost |
57 host = MockHost() | 56 host = MockHost() |
58 else: | 57 else: |
59 host = Host() | 58 host = Host() |
60 | 59 |
61 if options.lint_test_files: | 60 if options.lint_test_files: |
62 from webkitpy.layout_tests.lint_test_expectations import run_checks | 61 from webkitpy.layout_tests.lint_test_expectations import run_checks |
63 return run_checks(host, options, stderr) | 62 return run_checks(host, options, stderr) |
64 | 63 |
65 try: | 64 try: |
66 port = host.port_factory.get(options.platform, options) | 65 port = host.port_factory.get(options.platform, options) |
67 except NotImplementedError, e: | 66 except NotImplementedError as e: |
68 # FIXME: is this the best way to handle unsupported port names? | 67 # FIXME: is this the best way to handle unsupported port names? |
69 print >> stderr, str(e) | 68 print >> stderr, str(e) |
70 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS | 69 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS |
71 | 70 |
72 try: | 71 try: |
73 run_details = run(port, options, args, stderr) | 72 run_details = run(port, options, args, stderr) |
74 if ((run_details.exit_code not in test_run_results.ERROR_CODES or | 73 if ((run_details.exit_code not in test_run_results.ERROR_CODES or |
75 run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) and | 74 run_details.exit_code == test_run_results.EARLY_EXIT_STATUS) and |
76 not run_details.initial_results.keyboard_interrupted): | 75 not run_details.initial_results.keyboard_interrupted): |
77 bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug
_rwt_logging) | 76 bot_printer = buildbot_results.BuildBotPrinter(stdout, options.debug
_rwt_logging) |
78 bot_printer.print_results(run_details) | 77 bot_printer.print_results(run_details) |
79 | 78 |
80 if options.enable_versioned_results: | 79 if options.enable_versioned_results: |
81 gen_dash_board = GenerateDashBoard(port) | 80 gen_dash_board = GenerateDashBoard(port) |
82 gen_dash_board.generate() | 81 gen_dash_board.generate() |
83 | 82 |
84 return run_details.exit_code | 83 return run_details.exit_code |
85 | 84 |
86 # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest c
ases. | 85 # We need to still handle KeyboardInterrupt, atleast for webkitpy unittest c
ases. |
87 except KeyboardInterrupt: | 86 except KeyboardInterrupt: |
88 return test_run_results.INTERRUPTED_EXIT_STATUS | 87 return test_run_results.INTERRUPTED_EXIT_STATUS |
89 except test_run_results.TestRunException as e: | 88 except test_run_results.TestRunException as e: |
90 print >> stderr, e.msg | 89 print >> stderr, e.msg |
91 return e.code | 90 return e.code |
92 except BaseException as e: | 91 except BaseException as e: |
93 if isinstance(e, Exception): | 92 if isinstance(e, Exception): |
94 print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e)) | 93 print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e)) |
95 traceback.print_exc(file=stderr) | 94 traceback.print_exc(file=stderr) |
96 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS | 95 return test_run_results.UNEXPECTED_ERROR_EXIT_STATUS |
97 | 96 |
98 | 97 |
99 def parse_args(args): | 98 def parse_args(args): |
100 option_group_definitions = [] | 99 option_group_definitions = [] |
101 | 100 |
102 option_group_definitions.append(("Platform options", platform_options())) | 101 option_group_definitions.append(('Platform options', platform_options())) |
103 option_group_definitions.append(("Configuration options", configuration_opti
ons())) | 102 option_group_definitions.append(('Configuration options', configuration_opti
ons())) |
104 option_group_definitions.append(("Printing Options", printing.print_options(
))) | 103 option_group_definitions.append(('Printing Options', printing.print_options(
))) |
105 | 104 |
106 option_group_definitions.append(("Android-specific Options", [ | 105 option_group_definitions.append(('Android-specific Options', [ |
107 optparse.make_option("--adb-device", | 106 optparse.make_option('--adb-device', |
108 action="append", default=[], | 107 action='append', default=[], |
109 help="Run Android layout tests on these devices."), | 108 help='Run Android layout tests on these devices.'), |
110 | 109 |
111 # FIXME: Flip this to be off by default once we can log the device setup
more cleanly. | 110 # FIXME: Flip this to be off by default once we can log the device setup
more cleanly. |
112 optparse.make_option("--no-android-logging", | 111 optparse.make_option('--no-android-logging', |
113 action="store_false", dest='android_logging', default=True, | 112 action='store_false', dest='android_logging', defau
lt=True, |
114 help="Do not log android-specific debug messages (default is to log
as part of --debug-rwt-logging"), | 113 help='Do not log android-specific debug messages (d
efault is to log as part of --debug-rwt-logging'), |
115 ])) | 114 ])) |
116 | 115 |
117 option_group_definitions.append(("Results Options", [ | 116 option_group_definitions.append(('Results Options', [ |
118 optparse.make_option("-p", "--pixel", "--pixel-tests", action="store_tru
e", | 117 optparse.make_option('-p', '--pixel', '--pixel-tests', action='store_tru
e', |
119 dest="pixel_tests", help="Enable pixel-to-pixel PNG comparisons"), | 118 dest='pixel_tests', help='Enable pixel-to-pixel PNG
comparisons'), |
120 optparse.make_option("--no-pixel", "--no-pixel-tests", action="store_fal
se", | 119 optparse.make_option('--no-pixel', '--no-pixel-tests', action='store_fal
se', |
121 dest="pixel_tests", help="Disable pixel-to-pixel PNG comparisons"), | 120 dest='pixel_tests', help='Disable pixel-to-pixel PN
G comparisons'), |
122 optparse.make_option("--results-directory", help="Location of test resul
ts"), | 121 optparse.make_option('--results-directory', help='Location of test resul
ts'), |
123 optparse.make_option("--build-directory", | 122 optparse.make_option('--build-directory', |
124 help="Path to the directory under which build files are kept (should
not include configuration)"), | 123 help='Path to the directory under which build files
are kept (should not include configuration)'), |
125 optparse.make_option("--add-platform-exceptions", action="store_true", d
efault=False, | 124 optparse.make_option('--add-platform-exceptions', action='store_true', d
efault=False, |
126 help="Save generated results into the *most-specific-platform* direc
tory rather than the *generic-platform* directory"), | 125 help='Save generated results into the *most-specifi
c-platform* directory rather than the *generic-platform* directory'), |
127 optparse.make_option("--new-baseline", action="store_true", | 126 optparse.make_option('--new-baseline', action='store_true', |
128 default=False, help="Save generated results as new baselines " | 127 default=False, help='Save generated results as new
baselines ' |
129 "into the *most-specific-platform* directory, overwriting whate
ver's " | 128 "into the *most-specific-platform* directory, overw
riting whatever's " |
130 "already there. Equivalent to --reset-results --add-platform-ex
ceptions"), | 129 'already there. Equivalent to --reset-results --add
-platform-exceptions'), |
131 optparse.make_option("--reset-results", action="store_true", | 130 optparse.make_option('--reset-results', action='store_true', |
132 default=False, help="Reset expectations to the " | 131 default=False, help='Reset expectations to the ' |
133 "generated results in their existing location."), | 132 'generated results in their existing location.'), |
134 optparse.make_option("--no-new-test-results", action="store_false", | 133 optparse.make_option('--no-new-test-results', action='store_false', |
135 dest="new_test_results", default=True, | 134 dest='new_test_results', default=True, |
136 help="Don't create new baselines when no expected results exist"), | 135 help="Don't create new baselines when no expected r
esults exist"), |
137 | 136 |
138 #FIXME: we should support a comma separated list with --pixel-test-direc
tory as well. | 137 # FIXME: we should support a comma separated list with --pixel-test-dire
ctory as well. |
139 optparse.make_option("--pixel-test-directory", action="append", default=
[], dest="pixel_test_directories", | 138 optparse.make_option('--pixel-test-directory', action='append', default=
[], dest='pixel_test_directories', |
140 help="A directory where it is allowed to execute tests as pixel test
s. " | 139 help='A directory where it is allowed to execute te
sts as pixel tests. ' |
141 "Specify multiple times to add multiple directories. " | 140 'Specify multiple times to add multiple directories
. ' |
142 "This option implies --pixel-tests. If specified, only those te
sts " | 141 'This option implies --pixel-tests. If specified, o
nly those tests ' |
143 "will be executed as pixel tests that are located in one of the
" | 142 'will be executed as pixel tests that are located i
n one of the ' |
144 "directories enumerated with the option. Some ports may ignore
this " | 143 'directories enumerated with the option. Some ports
may ignore this ' |
145 "option while others can have a default value that can be overr
idden here."), | 144 'option while others can have a default value that
can be overridden here.'), |
146 | 145 |
147 optparse.make_option("--skip-failing-tests", action="store_true", | 146 optparse.make_option('--skip-failing-tests', action='store_true', |
148 default=False, help="Skip tests that are expected to fail. " | 147 default=False, help='Skip tests that are expected t
o fail. ' |
149 "Note: When using this option, you might miss new crashes " | 148 'Note: When using this option, you might miss new c
rashes ' |
150 "in these tests."), | 149 'in these tests.'), |
151 optparse.make_option("--additional-drt-flag", action="append", | 150 optparse.make_option('--additional-drt-flag', action='append', |
152 default=[], help="Additional command line flag to pass to the driver
" | 151 default=[], help='Additional command line flag to p
ass to the driver ' |
153 "Specify multiple times to add multiple flags."), | 152 'Specify multiple times to add multiple flags.'), |
154 optparse.make_option("--driver-name", type="string", | 153 optparse.make_option('--driver-name', type='string', |
155 help="Alternative driver binary to use"), | 154 help='Alternative driver binary to use'), |
156 optparse.make_option("--additional-platform-directory", action="append", | 155 optparse.make_option('--additional-platform-directory', action='append', |
157 default=[], help="Additional directory where to look for test " | 156 default=[], help='Additional directory where to loo
k for test ' |
158 "baselines (will take precendence over platform baselines). " | 157 'baselines (will take precendence over platform bas
elines). ' |
159 "Specify multiple times to add multiple search path entries."), | 158 'Specify multiple times to add multiple search path
entries.'), |
160 optparse.make_option("--additional-expectations", action="append", defau
lt=[], | 159 optparse.make_option('--additional-expectations', action='append', defau
lt=[], |
161 help="Path to a test_expectations file that will override previous e
xpectations. " | 160 help='Path to a test_expectations file that will ov
erride previous expectations. ' |
162 "Specify multiple times for multiple sets of overrides."), | 161 'Specify multiple times for multiple sets of overri
des.'), |
163 optparse.make_option("--compare-port", action="store", default=None, | 162 optparse.make_option('--compare-port', action='store', default=None, |
164 help="Use the specified port's baselines first"), | 163 help="Use the specified port's baselines first"), |
165 optparse.make_option("--no-show-results", action="store_false", | 164 optparse.make_option('--no-show-results', action='store_false', |
166 default=True, dest="show_results", | 165 default=True, dest='show_results', |
167 help="Don't launch a browser with results after the tests " | 166 help="Don't launch a browser with results after the
tests " |
168 "are done"), | 167 'are done'), |
169 optparse.make_option("--full-results-html", action="store_true", | 168 optparse.make_option('--full-results-html', action='store_true', |
170 default=False, | 169 default=False, |
171 help="Show all failures in results.html, rather than only regression
s"), | 170 help='Show all failures in results.html, rather tha
n only regressions'), |
172 optparse.make_option("--clobber-old-results", action="store_true", | 171 optparse.make_option('--clobber-old-results', action='store_true', |
173 default=False, help="Clobbers test results from previous runs."), | 172 default=False, help='Clobbers test results from pre
vious runs.'), |
174 optparse.make_option("--enable-versioned-results", action="store_true", | 173 optparse.make_option('--enable-versioned-results', action='store_true', |
175 default=False, help="Archive the test results for later access."), | 174 default=False, help='Archive the test results for l
ater access.'), |
176 optparse.make_option("--smoke", action="store_true", | 175 optparse.make_option('--smoke', action='store_true', |
177 help="Run just the SmokeTests"), | 176 help='Run just the SmokeTests'), |
178 optparse.make_option("--no-smoke", dest="smoke", action="store_false", | 177 optparse.make_option('--no-smoke', dest='smoke', action='store_false', |
179 help="Do not run just the SmokeTests"), | 178 help='Do not run just the SmokeTests'), |
180 ])) | 179 ])) |
181 | 180 |
182 option_group_definitions.append(("Testing Options", [ | 181 option_group_definitions.append(('Testing Options', [ |
183 optparse.make_option("--build", dest="build", | 182 optparse.make_option('--build', dest='build', |
184 action="store_true", default=True, | 183 action='store_true', default=True, |
185 help="Check to ensure the build is up-to-date (default)."), | 184 help='Check to ensure the build is up-to-date (defa
ult).'), |
186 optparse.make_option("--no-build", dest="build", | 185 optparse.make_option('--no-build', dest='build', |
187 action="store_false", help="Don't check to see if the build is up-to
-date."), | 186 action='store_false', help="Don't check to see if t
he build is up-to-date."), |
188 optparse.make_option("-n", "--dry-run", action="store_true", | 187 optparse.make_option('-n', '--dry-run', action='store_true', |
189 default=False, | 188 default=False, |
190 help="Do everything but actually run the tests or upload results."), | 189 help='Do everything but actually run the tests or u
pload results.'), |
191 optparse.make_option("--nocheck-sys-deps", action="store_true", | 190 optparse.make_option('--nocheck-sys-deps', action='store_true', |
192 default=False, | 191 default=False, |
193 help="Don't check the system dependencies (themes)"), | 192 help="Don't check the system dependencies (themes)"
), |
194 optparse.make_option("--wrapper", | 193 optparse.make_option('--wrapper', |
195 help="wrapper command to insert before invocations of " | 194 help='wrapper command to insert before invocations
of ' |
196 "the driver; option is split on whitespace before " | 195 'the driver; option is split on whitespace before ' |
197 "running. (Example: --wrapper='valgrind --smc-check=all')"), | 196 "running. (Example: --wrapper='valgrind --smc-check
=all')"), |
198 optparse.make_option("-i", "--ignore-tests", action="append", default=[]
, | 197 optparse.make_option('-i', '--ignore-tests', action='append', default=[]
, |
199 help="directories or test to ignore (may specify multiple times)"), | 198 help='directories or test to ignore (may specify mu
ltiple times)'), |
200 optparse.make_option("--ignore-flaky-tests", action="store", | 199 optparse.make_option('--ignore-flaky-tests', action='store', |
201 help=("Control whether tests that are flaky on the bots get ignored.
" | 200 help=('Control whether tests that are flaky on the
bots get ignored.' |
202 "'very-flaky' == Ignore any tests that flaked more than once on
the bot." | 201 "'very-flaky' == Ignore any tests that flaked
more than once on the bot." |
203 "'maybe-flaky' == Ignore any tests that flaked once on the bot." | 202 "'maybe-flaky' == Ignore any tests that flake
d once on the bot." |
204 "'unexpected' == Ignore any tests that had unexpected results on
the bot.")), | 203 "'unexpected' == Ignore any tests that had un
expected results on the bot.")), |
205 optparse.make_option("--ignore-builder-category", action="store", | 204 optparse.make_option('--ignore-builder-category', action='store', |
206 help=("The category of builders to use with the --ignore-flaky-tests
" | 205 help=('The category of builders to use with the --i
gnore-flaky-tests ' |
207 "option ('layout' or 'deps').")), | 206 "option ('layout' or 'deps').")), |
208 optparse.make_option("--test-list", action="append", | 207 optparse.make_option('--test-list', action='append', |
209 help="read list of tests to run from file", metavar="FILE"), | 208 help='read list of tests to run from file', metavar
='FILE'), |
210 optparse.make_option("--skipped", action="store", default=None, | 209 optparse.make_option('--skipped', action='store', default=None, |
211 help=("control how tests marked SKIP are run. " | 210 help=('control how tests marked SKIP are run. ' |
212 "'default' == Skip tests unless explicitly listed on the comman
d line, " | 211 "'default' == Skip tests unless explicitly li
sted on the command line, " |
213 "'ignore' == Run them anyway, " | 212 "'ignore' == Run them anyway, " |
214 "'only' == only run the SKIP tests, " | 213 "'only' == only run the SKIP tests, " |
215 "'always' == always skip, even if listed on the command line.")
), | 214 "'always' == always skip, even if listed on t
he command line.")), |
216 optparse.make_option("--time-out-ms", | 215 optparse.make_option('--time-out-ms', |
217 help="Set the timeout for each test"), | 216 help='Set the timeout for each test'), |
218 optparse.make_option("--order", action="store", default="natural", | 217 optparse.make_option('--order', action='store', default='natural', |
219 help=("determine the order in which the test cases will be run. " | 218 help=('determine the order in which the test cases
will be run. ' |
220 "'none' == use the order in which the tests were listed either
in arguments or test list, " | 219 "'none' == use the order in which the tests w
ere listed either in arguments or test list, " |
221 "'natural' == use the natural order (default), " | 220 "'natural' == use the natural order (default)
, " |
222 "'random-seeded' == randomize the test order using a fixed see
d, " | 221 "'random-seeded' == randomize the test order
using a fixed seed, " |
223 "'random' == randomize the test order.")), | 222 "'random' == randomize the test order.")), |
224 optparse.make_option("--run-chunk", | 223 optparse.make_option('--run-chunk', |
225 help=("Run a specified chunk (n:l), the nth of len l, " | 224 help=('Run a specified chunk (n:l), the nth of len
l, ' |
226 "of the layout tests")), | 225 'of the layout tests')), |
227 optparse.make_option("--run-part", help=("Run a specified part (n:m), " | 226 optparse.make_option('--run-part', help=('Run a specified part (n:m), ' |
228 "the nth of m parts, of the layout tests")), | 227 'the nth of m parts, of the lay
out tests')), |
229 optparse.make_option("--batch-size", | 228 optparse.make_option('--batch-size', |
230 help=("Run a the tests in batches (n), after every n tests, " | 229 help=('Run a the tests in batches (n), after every
n tests, ' |
231 "the driver is relaunched."), type="int", default=None), | 230 'the driver is relaunched.'), type='int', def
ault=None), |
232 optparse.make_option("--run-singly", action="store_true", | 231 optparse.make_option('--run-singly', action='store_true', |
233 default=False, help="DEPRECATED, same as --batch-size=1 --verbose"), | 232 default=False, help='DEPRECATED, same as --batch-si
ze=1 --verbose'), |
234 optparse.make_option("--child-processes", | 233 optparse.make_option('--child-processes', |
235 help="Number of drivers to run in parallel."), | 234 help='Number of drivers to run in parallel.'), |
236 # FIXME: Display default number of child processes that will run. | 235 # FIXME: Display default number of child processes that will run. |
237 optparse.make_option("-f", "--fully-parallel", action="store_true", | 236 optparse.make_option('-f', '--fully-parallel', action='store_true', |
238 help="run all tests in parallel"), | 237 help='run all tests in parallel'), |
239 optparse.make_option("--exit-after-n-failures", type="int", default=None
, | 238 optparse.make_option('--exit-after-n-failures', type='int', default=None
, |
240 help="Exit after the first N failures instead of running all " | 239 help='Exit after the first N failures instead of ru
nning all ' |
241 "tests"), | 240 'tests'), |
242 optparse.make_option("--exit-after-n-crashes-or-timeouts", type="int", | 241 optparse.make_option('--exit-after-n-crashes-or-timeouts', type='int', |
243 default=None, help="Exit after the first N crashes instead of " | 242 default=None, help='Exit after the first N crashes
instead of ' |
244 "running all tests"), | 243 'running all tests'), |
245 optparse.make_option("--iterations", type="int", default=1, help="Number
of times to run the set of tests (e.g. ABCABCABC)"), | 244 optparse.make_option( |
246 optparse.make_option("--repeat-each", type="int", default=1, help="Numbe
r of times to run each test (e.g. AAABBBCCC)"), | 245 '--iterations', |
247 optparse.make_option("--retry-failures", action="store_true", | 246 type='int', |
248 help="Re-try any tests that produce unexpected results. Default is t
o not retry if an explicit list of tests is passed to run-webkit-tests."), | 247 default=1, |
249 optparse.make_option("--no-retry-failures", action="store_false", | 248 help='Number of times to run the set of tests (e.g. ABCABCABC)'), |
250 dest="retry_failures", | 249 optparse.make_option('--repeat-each', type='int', default=1, help='Numbe
r of times to run each test (e.g. AAABBBCCC)'), |
251 help="Don't re-try any tests that produce unexpected results."), | 250 optparse.make_option('--retry-failures', action='store_true', |
252 | 251 help='Re-try any tests that produce unexpected resu
lts. Default is to not retry if an explicit list of tests is passed to run-webki
t-tests.'), |
253 optparse.make_option("--max-locked-shards", type="int", default=0, | 252 optparse.make_option('--no-retry-failures', action='store_false', |
254 help="Set the maximum number of locked shards"), | 253 dest='retry_failures', |
255 optparse.make_option("--additional-env-var", type="string", action="appe
nd", default=[], | 254 help="Don't re-try any tests that produce unexpecte
d results."), |
256 help="Passes that environment variable to the tests (--additional-en
v-var=NAME=VALUE)"), | 255 |
257 optparse.make_option("--profile", action="store_true", | 256 optparse.make_option('--max-locked-shards', type='int', default=0, |
258 help="Output per-test profile information."), | 257 help='Set the maximum number of locked shards'), |
259 optparse.make_option("--profiler", action="store", | 258 optparse.make_option('--additional-env-var', type='string', action='appe
nd', default=[], |
260 help="Output per-test profile information, using the specified profi
ler."), | 259 help='Passes that environment variable to the tests
(--additional-env-var=NAME=VALUE)'), |
261 optparse.make_option("--driver-logging", action="store_true", | 260 optparse.make_option('--profile', action='store_true', |
262 help="Print detailed logging of the driver/content_shell"), | 261 help='Output per-test profile information.'), |
263 optparse.make_option("--disable-breakpad", action="store_true", | 262 optparse.make_option('--profiler', action='store', |
264 help="Don't use breakpad to symbolize unexpected crashes."), | 263 help='Output per-test profile information, using th
e specified profiler.'), |
265 optparse.make_option("--enable-leak-detection", action="store_true", | 264 optparse.make_option('--driver-logging', action='store_true', |
266 help="Enable the leak detection of DOM objects."), | 265 help='Print detailed logging of the driver/content_
shell'), |
267 optparse.make_option("--enable-sanitizer", action="store_true", | 266 optparse.make_option('--disable-breakpad', action='store_true', |
268 help="Only alert on sanitizer-related errors and crashes"), | 267 help="Don't use breakpad to symbolize unexpected cr
ashes."), |
269 ])) | 268 optparse.make_option('--enable-leak-detection', action='store_true', |
270 | 269 help='Enable the leak detection of DOM objects.'), |
271 option_group_definitions.append(("Miscellaneous Options", [ | 270 optparse.make_option('--enable-sanitizer', action='store_true', |
272 optparse.make_option("--lint-test-files", action="store_true", | 271 help='Only alert on sanitizer-related errors and cr
ashes'), |
273 default=False, help=("Makes sure the test files parse for all " | 272 ])) |
274 "configurations. Does not run any tests.")), | 273 |
| 274 option_group_definitions.append(('Miscellaneous Options', [ |
| 275 optparse.make_option('--lint-test-files', action='store_true', |
| 276 default=False, help=('Makes sure the test files par
se for all ' |
| 277 'configurations. Does not run
any tests.')), |
275 ])) | 278 ])) |
276 | 279 |
277 # FIXME: Move these into json_results_generator.py | 280 # FIXME: Move these into json_results_generator.py |
278 option_group_definitions.append(("Result JSON Options", [ | 281 option_group_definitions.append(('Result JSON Options', [ |
279 optparse.make_option("--master-name", help="The name of the buildbot mas
ter."), | 282 optparse.make_option('--master-name', help='The name of the buildbot mas
ter.'), |
280 optparse.make_option("--builder-name", default="", | 283 optparse.make_option('--builder-name', default='', |
281 help=("The name of the builder shown on the waterfall running " | 284 help=('The name of the builder shown on the waterfa
ll running ' |
282 "this script e.g. WebKit.")), | 285 'this script e.g. WebKit.')), |
283 optparse.make_option("--build-name", default="DUMMY_BUILD_NAME", | 286 optparse.make_option('--build-name', default='DUMMY_BUILD_NAME', |
284 help=("The name of the builder used in its path, e.g. " | 287 help=('The name of the builder used in its path, e.
g. ' |
285 "webkit-rel.")), | 288 'webkit-rel.')), |
286 optparse.make_option("--build-number", default="DUMMY_BUILD_NUMBER", | 289 optparse.make_option('--build-number', default='DUMMY_BUILD_NUMBER', |
287 help=("The build number of the builder running this script.")), | 290 help=('The build number of the builder running this
script.')), |
288 optparse.make_option("--test-results-server", default="", | 291 optparse.make_option('--test-results-server', default='', |
289 help=("If specified, upload results json files to this appengine " | 292 help=('If specified, upload results json files to t
his appengine ' |
290 "server.")), | 293 'server.')), |
291 optparse.make_option("--write-full-results-to", | 294 optparse.make_option('--write-full-results-to', |
292 help=("If specified, copy full_results.json from the results dir " | 295 help=('If specified, copy full_results.json from th
e results dir ' |
293 "to the specified path.")), | 296 'to the specified path.')), |
294 ])) | 297 ])) |
295 | 298 |
296 option_parser = optparse.OptionParser() | 299 option_parser = optparse.OptionParser() |
297 | 300 |
298 for group_name, group_options in option_group_definitions: | 301 for group_name, group_options in option_group_definitions: |
299 option_group = optparse.OptionGroup(option_parser, group_name) | 302 option_group = optparse.OptionGroup(option_parser, group_name) |
300 option_group.add_options(group_options) | 303 option_group.add_options(group_options) |
301 option_parser.add_option_group(option_group) | 304 option_parser.add_option_group(option_group) |
302 | 305 |
303 return option_parser.parse_args(args) | 306 return option_parser.parse_args(args) |
304 | 307 |
305 | 308 |
306 def _set_up_derived_options(port, options, args): | 309 def _set_up_derived_options(port, options, args): |
307 """Sets the options values that depend on other options values.""" | 310 """Sets the options values that depend on other options values.""" |
308 if not options.child_processes: | 311 if not options.child_processes: |
309 options.child_processes = os.environ.get("WEBKIT_TEST_CHILD_PROCESSES", | 312 options.child_processes = os.environ.get('WEBKIT_TEST_CHILD_PROCESSES', |
310 str(port.default_child_processe
s())) | 313 str(port.default_child_processe
s())) |
311 if not options.max_locked_shards: | 314 if not options.max_locked_shards: |
312 options.max_locked_shards = int(os.environ.get("WEBKIT_TEST_MAX_LOCKED_S
HARDS", | 315 options.max_locked_shards = int(os.environ.get('WEBKIT_TEST_MAX_LOCKED_S
HARDS', |
313 str(port.default_max_lock
ed_shards()))) | 316 str(port.default_max_lock
ed_shards()))) |
314 | 317 |
315 if not options.configuration: | 318 if not options.configuration: |
316 options.configuration = port.default_configuration() | 319 options.configuration = port.default_configuration() |
317 | 320 |
318 if options.pixel_tests is None: | 321 if options.pixel_tests is None: |
319 options.pixel_tests = port.default_pixel_tests() | 322 options.pixel_tests = port.default_pixel_tests() |
320 | 323 |
321 if not options.time_out_ms: | 324 if not options.time_out_ms: |
322 options.time_out_ms = str(port.default_timeout_ms()) | 325 options.time_out_ms = str(port.default_timeout_ms()) |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
362 | 365 |
363 if not options.test_list: | 366 if not options.test_list: |
364 options.test_list = [] | 367 options.test_list = [] |
365 options.test_list.append(port.host.filesystem.join(port.layout_tests_dir
(), 'SmokeTests')) | 368 options.test_list.append(port.host.filesystem.join(port.layout_tests_dir
(), 'SmokeTests')) |
366 if not options.skipped: | 369 if not options.skipped: |
367 options.skipped = 'always' | 370 options.skipped = 'always' |
368 | 371 |
369 if not options.skipped: | 372 if not options.skipped: |
370 options.skipped = 'default' | 373 options.skipped = 'default' |
371 | 374 |
| 375 |
372 def run(port, options, args, logging_stream): | 376 def run(port, options, args, logging_stream): |
373 logger = logging.getLogger() | 377 logger = logging.getLogger() |
374 logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO
) | 378 logger.setLevel(logging.DEBUG if options.debug_rwt_logging else logging.INFO
) |
375 | 379 |
376 try: | 380 try: |
377 printer = printing.Printer(port, options, logging_stream, logger=logger) | 381 printer = printing.Printer(port, options, logging_stream, logger=logger) |
378 | 382 |
379 _set_up_derived_options(port, options, args) | 383 _set_up_derived_options(port, options, args) |
380 manager = Manager(port, options, printer) | 384 manager = Manager(port, options, printer) |
381 printer.print_config(port.results_directory()) | 385 printer.print_config(port.results_directory()) |
382 | 386 |
383 run_details = manager.run(args) | 387 run_details = manager.run(args) |
384 _log.debug("Testing completed, Exit status: %d" % run_details.exit_code) | 388 _log.debug('Testing completed, Exit status: %d' % run_details.exit_code) |
385 return run_details | 389 return run_details |
386 finally: | 390 finally: |
387 printer.cleanup() | 391 printer.cleanup() |
388 | 392 |
389 if __name__ == '__main__': | 393 if __name__ == '__main__': |
390 sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr)) | 394 sys.exit(main(sys.argv[1:], sys.stdout, sys.stderr)) |
OLD | NEW |