OLD | NEW |
| (Empty) |
1 #!/usr/bin/env python | |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 import collections | |
7 import glob | |
8 import hashlib | |
9 import json | |
10 import os | |
11 import random | |
12 import re | |
13 import shutil | |
14 import sys | |
15 | |
16 import bb_utils | |
17 import bb_annotations | |
18 | |
19 sys.path.append(os.path.join(os.path.dirname(__file__), '..')) | |
20 import provision_devices | |
21 from pylib import constants | |
22 from pylib.device import device_utils | |
23 from pylib.gtest import gtest_config | |
24 | |
25 CHROME_SRC_DIR = bb_utils.CHROME_SRC | |
26 DIR_BUILD_ROOT = os.path.dirname(CHROME_SRC_DIR) | |
27 CHROME_OUT_DIR = bb_utils.CHROME_OUT_DIR | |
28 BLINK_SCRIPTS_DIR = 'third_party/WebKit/Tools/Scripts' | |
29 | |
30 SLAVE_SCRIPTS_DIR = os.path.join(bb_utils.BB_BUILD_DIR, 'scripts', 'slave') | |
31 LOGCAT_DIR = os.path.join(bb_utils.CHROME_OUT_DIR, 'logcat') | |
32 GS_URL = 'https://storage.googleapis.com' | |
33 GS_AUTH_URL = 'https://storage.cloud.google.com' | |
34 | |
35 # Describes an instrumation test suite: | |
36 # test: Name of test we're running. | |
37 # apk: apk to be installed. | |
38 # apk_package: package for the apk to be installed. | |
39 # test_apk: apk to run tests on. | |
40 # test_data: data folder in format destination:source. | |
41 # host_driven_root: The host-driven test root directory. | |
42 # annotation: Annotation of the tests to include. | |
43 # exclude_annotation: The annotation of the tests to exclude. | |
44 I_TEST = collections.namedtuple('InstrumentationTest', [ | |
45 'name', 'apk', 'apk_package', 'test_apk', 'test_data', 'isolate_file_path', | |
46 'host_driven_root', 'annotation', 'exclude_annotation', 'extra_flags']) | |
47 | |
48 | |
49 def SrcPath(*path): | |
50 return os.path.join(CHROME_SRC_DIR, *path) | |
51 | |
52 | |
53 def I(name, apk, apk_package, test_apk, test_data, isolate_file_path=None, | |
54 host_driven_root=None, annotation=None, exclude_annotation=None, | |
55 extra_flags=None): | |
56 return I_TEST(name, apk, apk_package, test_apk, test_data, isolate_file_path, | |
57 host_driven_root, annotation, exclude_annotation, extra_flags) | |
58 | |
59 INSTRUMENTATION_TESTS = dict((suite.name, suite) for suite in [ | |
60 I('ContentShell', | |
61 'ContentShell.apk', | |
62 'org.chromium.content_shell_apk', | |
63 'ContentShellTest', | |
64 'content:content/test/data/android/device_files', | |
65 isolate_file_path='content/content_shell_test_apk.isolate'), | |
66 I('ChromeShell', | |
67 'ChromeShell.apk', | |
68 'org.chromium.chrome.shell', | |
69 'ChromeShellTest', | |
70 'chrome:chrome/test/data/android/device_files', | |
71 isolate_file_path='chrome/chrome_shell_test_apk.isolate', | |
72 host_driven_root=constants.CHROME_SHELL_HOST_DRIVEN_DIR), | |
73 I('AndroidWebView', | |
74 'AndroidWebView.apk', | |
75 'org.chromium.android_webview.shell', | |
76 'AndroidWebViewTest', | |
77 'webview:android_webview/test/data/device_files', | |
78 isolate_file_path='android_webview/android_webview_test_apk.isolate'), | |
79 I('ChromeSyncShell', | |
80 'ChromeSyncShell.apk', | |
81 'org.chromium.chrome.browser.sync', | |
82 'ChromeSyncShellTest', | |
83 None), | |
84 ]) | |
85 | |
86 InstallablePackage = collections.namedtuple('InstallablePackage', [ | |
87 'name', 'apk', 'apk_package']) | |
88 | |
89 INSTALLABLE_PACKAGES = dict((package.name, package) for package in ( | |
90 [InstallablePackage(i.name, i.apk, i.apk_package) | |
91 for i in INSTRUMENTATION_TESTS.itervalues()] + | |
92 [InstallablePackage('ChromeDriverWebViewShell', | |
93 'ChromeDriverWebViewShell.apk', | |
94 'org.chromium.chromedriver_webview_shell')])) | |
95 | |
96 VALID_TESTS = set([ | |
97 'base_junit_tests', | |
98 'chromedriver', | |
99 'chrome_proxy', | |
100 'components_browsertests', | |
101 'gfx_unittests', | |
102 'gl_unittests', | |
103 'gpu', | |
104 'python_unittests', | |
105 'telemetry_unittests', | |
106 'telemetry_perf_unittests', | |
107 'ui', | |
108 'unit', | |
109 'webkit', | |
110 'webkit_layout' | |
111 ]) | |
112 | |
113 RunCmd = bb_utils.RunCmd | |
114 | |
115 | |
116 def _GetRevision(options): | |
117 """Get the SVN revision number. | |
118 | |
119 Args: | |
120 options: options object. | |
121 | |
122 Returns: | |
123 The revision number. | |
124 """ | |
125 revision = options.build_properties.get('got_revision') | |
126 if not revision: | |
127 revision = options.build_properties.get('revision', 'testing') | |
128 return revision | |
129 | |
130 | |
131 def _RunTest(options, cmd, suite): | |
132 """Run test command with runtest.py. | |
133 | |
134 Args: | |
135 options: options object. | |
136 cmd: the command to run. | |
137 suite: test name. | |
138 """ | |
139 property_args = bb_utils.EncodeProperties(options) | |
140 args = [os.path.join(SLAVE_SCRIPTS_DIR, 'runtest.py')] + property_args | |
141 args += ['--test-platform', 'android'] | |
142 if options.factory_properties.get('generate_gtest_json'): | |
143 args.append('--generate-json-file') | |
144 args += ['-o', 'gtest-results/%s' % suite, | |
145 '--annotate', 'gtest', | |
146 '--build-number', str(options.build_properties.get('buildnumber', | |
147 '')), | |
148 '--builder-name', options.build_properties.get('buildername', '')] | |
149 if options.target == 'Release': | |
150 args += ['--target', 'Release'] | |
151 else: | |
152 args += ['--target', 'Debug'] | |
153 if options.flakiness_server: | |
154 args += ['--flakiness-dashboard-server=%s' % | |
155 options.flakiness_server] | |
156 args += cmd | |
157 RunCmd(args, cwd=DIR_BUILD_ROOT) | |
158 | |
159 | |
160 def RunTestSuites(options, suites, suites_options=None): | |
161 """Manages an invocation of test_runner.py for gtests. | |
162 | |
163 Args: | |
164 options: options object. | |
165 suites: List of suite names to run. | |
166 suites_options: Command line options dictionary for particular suites. | |
167 For example, | |
168 {'content_browsertests', ['--num_retries=1', '--release']} | |
169 will add the options only to content_browsertests. | |
170 """ | |
171 | |
172 if not suites_options: | |
173 suites_options = {} | |
174 | |
175 args = ['--verbose'] | |
176 if options.target == 'Release': | |
177 args.append('--release') | |
178 if options.asan: | |
179 args.append('--tool=asan') | |
180 if options.gtest_filter: | |
181 args.append('--gtest-filter=%s' % options.gtest_filter) | |
182 | |
183 for suite in suites: | |
184 bb_annotations.PrintNamedStep(suite) | |
185 cmd = [suite] + args | |
186 cmd += suites_options.get(suite, []) | |
187 if suite == 'content_browsertests' or suite == 'components_browsertests': | |
188 cmd.append('--num_retries=1') | |
189 _RunTest(options, cmd, suite) | |
190 | |
191 | |
192 def RunJunitSuite(suite): | |
193 bb_annotations.PrintNamedStep(suite) | |
194 RunCmd(['build/android/test_runner.py', 'junit', '-s', suite]) | |
195 | |
196 | |
197 def RunChromeDriverTests(options): | |
198 """Run all the steps for running chromedriver tests.""" | |
199 bb_annotations.PrintNamedStep('chromedriver_annotation') | |
200 RunCmd(['chrome/test/chromedriver/run_buildbot_steps.py', | |
201 '--android-packages=%s,%s,%s,%s' % | |
202 ('chrome_shell', | |
203 'chrome_stable', | |
204 'chrome_beta', | |
205 'chromedriver_webview_shell'), | |
206 '--revision=%s' % _GetRevision(options), | |
207 '--update-log']) | |
208 | |
209 def RunChromeProxyTests(options): | |
210 """Run the chrome_proxy tests. | |
211 | |
212 Args: | |
213 options: options object. | |
214 """ | |
215 InstallApk(options, INSTRUMENTATION_TESTS['ChromeShell'], False) | |
216 args = ['--browser', 'android-chrome-shell'] | |
217 devices = device_utils.DeviceUtils.HealthyDevices() | |
218 if devices: | |
219 args = args + ['--device', devices[0].adb.GetDeviceSerial()] | |
220 bb_annotations.PrintNamedStep('chrome_proxy') | |
221 RunCmd(['tools/chrome_proxy/run_tests'] + args) | |
222 | |
223 | |
224 def RunTelemetryTests(options, step_name, run_tests_path): | |
225 """Runs either telemetry_perf_unittests or telemetry_unittests. | |
226 | |
227 Args: | |
228 options: options object. | |
229 step_name: either 'telemetry_unittests' or 'telemetry_perf_unittests' | |
230 run_tests_path: path to run_tests script (tools/perf/run_tests for | |
231 perf_unittests and tools/telemetry/run_tests for | |
232 telemetry_unittests) | |
233 """ | |
234 InstallApk(options, INSTRUMENTATION_TESTS['ChromeShell'], False) | |
235 args = ['--browser', 'android-chrome-shell'] | |
236 devices = device_utils.DeviceUtils.HealthyDevices() | |
237 if devices: | |
238 args = args + ['--device', 'android'] | |
239 bb_annotations.PrintNamedStep(step_name) | |
240 RunCmd([run_tests_path] + args) | |
241 | |
242 | |
243 def InstallApk(options, test, print_step=False): | |
244 """Install an apk to all phones. | |
245 | |
246 Args: | |
247 options: options object | |
248 test: An I_TEST namedtuple | |
249 print_step: Print a buildbot step | |
250 """ | |
251 if print_step: | |
252 bb_annotations.PrintNamedStep('install_%s' % test.name.lower()) | |
253 | |
254 args = ['--apk_package', test.apk_package] | |
255 if options.target == 'Release': | |
256 args.append('--release') | |
257 args.append(test.apk) | |
258 | |
259 RunCmd(['build/android/adb_install_apk.py'] + args, halt_on_failure=True) | |
260 | |
261 | |
262 def RunInstrumentationSuite(options, test, flunk_on_failure=True, | |
263 python_only=False, official_build=False): | |
264 """Manages an invocation of test_runner.py for instrumentation tests. | |
265 | |
266 Args: | |
267 options: options object | |
268 test: An I_TEST namedtuple | |
269 flunk_on_failure: Flunk the step if tests fail. | |
270 Python: Run only host driven Python tests. | |
271 official_build: Run official-build tests. | |
272 """ | |
273 bb_annotations.PrintNamedStep('%s_instrumentation_tests' % test.name.lower()) | |
274 | |
275 if test.apk: | |
276 InstallApk(options, test) | |
277 args = ['--test-apk', test.test_apk, '--verbose'] | |
278 if test.test_data: | |
279 args.extend(['--test_data', test.test_data]) | |
280 if options.target == 'Release': | |
281 args.append('--release') | |
282 if options.asan: | |
283 args.append('--tool=asan') | |
284 if options.flakiness_server: | |
285 args.append('--flakiness-dashboard-server=%s' % | |
286 options.flakiness_server) | |
287 if options.coverage_bucket: | |
288 args.append('--coverage-dir=%s' % options.coverage_dir) | |
289 if test.isolate_file_path: | |
290 args.append('--isolate-file-path=%s' % test.isolate_file_path) | |
291 if test.host_driven_root: | |
292 args.append('--host-driven-root=%s' % test.host_driven_root) | |
293 if test.annotation: | |
294 args.extend(['-A', test.annotation]) | |
295 if test.exclude_annotation: | |
296 args.extend(['-E', test.exclude_annotation]) | |
297 if test.extra_flags: | |
298 args.extend(test.extra_flags) | |
299 if python_only: | |
300 args.append('-p') | |
301 if official_build: | |
302 # The option needs to be assigned 'True' as it does not have an action | |
303 # associated with it. | |
304 args.append('--official-build') | |
305 | |
306 RunCmd(['build/android/test_runner.py', 'instrumentation'] + args, | |
307 flunk_on_failure=flunk_on_failure) | |
308 | |
309 | |
310 def RunWebkitLint(): | |
311 """Lint WebKit's TestExpectation files.""" | |
312 bb_annotations.PrintNamedStep('webkit_lint') | |
313 RunCmd([SrcPath(os.path.join(BLINK_SCRIPTS_DIR, 'lint-test-expectations'))]) | |
314 | |
315 | |
316 def RunWebkitLayoutTests(options): | |
317 """Run layout tests on an actual device.""" | |
318 bb_annotations.PrintNamedStep('webkit_tests') | |
319 cmd_args = [ | |
320 '--no-show-results', | |
321 '--no-new-test-results', | |
322 '--full-results-html', | |
323 '--clobber-old-results', | |
324 '--exit-after-n-failures', '5000', | |
325 '--exit-after-n-crashes-or-timeouts', '100', | |
326 '--debug-rwt-logging', | |
327 '--results-directory', '../layout-test-results', | |
328 '--target', options.target, | |
329 '--builder-name', options.build_properties.get('buildername', ''), | |
330 '--build-number', str(options.build_properties.get('buildnumber', '')), | |
331 '--master-name', 'ChromiumWebkit', # TODO: Get this from the cfg. | |
332 '--build-name', options.build_properties.get('buildername', ''), | |
333 '--platform=android'] | |
334 | |
335 for flag in 'test_results_server', 'driver_name', 'additional_driver_flag': | |
336 if flag in options.factory_properties: | |
337 cmd_args.extend(['--%s' % flag.replace('_', '-'), | |
338 options.factory_properties.get(flag)]) | |
339 | |
340 for f in options.factory_properties.get('additional_expectations', []): | |
341 cmd_args.extend( | |
342 ['--additional-expectations=%s' % os.path.join(CHROME_SRC_DIR, *f)]) | |
343 | |
344 # TODO(dpranke): Remove this block after | |
345 # https://codereview.chromium.org/12927002/ lands. | |
346 for f in options.factory_properties.get('additional_expectations_files', []): | |
347 cmd_args.extend( | |
348 ['--additional-expectations=%s' % os.path.join(CHROME_SRC_DIR, *f)]) | |
349 | |
350 exit_code = RunCmd( | |
351 [SrcPath(os.path.join(BLINK_SCRIPTS_DIR, 'run-webkit-tests'))] + cmd_args) | |
352 if exit_code == 255: # test_run_results.UNEXPECTED_ERROR_EXIT_STATUS | |
353 bb_annotations.PrintMsg('?? (crashed or hung)') | |
354 elif exit_code == 254: # test_run_results.NO_DEVICES_EXIT_STATUS | |
355 bb_annotations.PrintMsg('?? (no devices found)') | |
356 elif exit_code == 253: # test_run_results.NO_TESTS_EXIT_STATUS | |
357 bb_annotations.PrintMsg('?? (no tests found)') | |
358 else: | |
359 full_results_path = os.path.join('..', 'layout-test-results', | |
360 'full_results.json') | |
361 if os.path.exists(full_results_path): | |
362 full_results = json.load(open(full_results_path)) | |
363 unexpected_passes, unexpected_failures, unexpected_flakes = ( | |
364 _ParseLayoutTestResults(full_results)) | |
365 if unexpected_failures: | |
366 _PrintDashboardLink('failed', unexpected_failures.keys(), | |
367 max_tests=25) | |
368 elif unexpected_passes: | |
369 _PrintDashboardLink('unexpected passes', unexpected_passes.keys(), | |
370 max_tests=10) | |
371 if unexpected_flakes: | |
372 _PrintDashboardLink('unexpected flakes', unexpected_flakes.keys(), | |
373 max_tests=10) | |
374 | |
375 if exit_code == 0 and (unexpected_passes or unexpected_flakes): | |
376 # If exit_code != 0, RunCmd() will have already printed an error. | |
377 bb_annotations.PrintWarning() | |
378 else: | |
379 bb_annotations.PrintError() | |
380 bb_annotations.PrintMsg('?? (results missing)') | |
381 | |
382 if options.factory_properties.get('archive_webkit_results', False): | |
383 bb_annotations.PrintNamedStep('archive_webkit_results') | |
384 base = 'https://storage.googleapis.com/chromium-layout-test-archives' | |
385 builder_name = options.build_properties.get('buildername', '') | |
386 build_number = str(options.build_properties.get('buildnumber', '')) | |
387 results_link = '%s/%s/%s/layout-test-results/results.html' % ( | |
388 base, EscapeBuilderName(builder_name), build_number) | |
389 bb_annotations.PrintLink('results', results_link) | |
390 bb_annotations.PrintLink('(zip)', '%s/%s/%s/layout-test-results.zip' % ( | |
391 base, EscapeBuilderName(builder_name), build_number)) | |
392 gs_bucket = 'gs://chromium-layout-test-archives' | |
393 RunCmd([os.path.join(SLAVE_SCRIPTS_DIR, 'chromium', | |
394 'archive_layout_test_results.py'), | |
395 '--results-dir', '../../layout-test-results', | |
396 '--build-number', build_number, | |
397 '--builder-name', builder_name, | |
398 '--gs-bucket', gs_bucket], | |
399 cwd=DIR_BUILD_ROOT) | |
400 | |
401 | |
402 def _ParseLayoutTestResults(results): | |
403 """Extract the failures from the test run.""" | |
404 # Cloned from third_party/WebKit/Tools/Scripts/print-json-test-results | |
405 tests = _ConvertTrieToFlatPaths(results['tests']) | |
406 failures = {} | |
407 flakes = {} | |
408 passes = {} | |
409 for (test, result) in tests.iteritems(): | |
410 if result.get('is_unexpected'): | |
411 actual_results = result['actual'].split() | |
412 expected_results = result['expected'].split() | |
413 if len(actual_results) > 1: | |
414 # We report the first failure type back, even if the second | |
415 # was more severe. | |
416 if actual_results[1] in expected_results: | |
417 flakes[test] = actual_results[0] | |
418 else: | |
419 failures[test] = actual_results[0] | |
420 elif actual_results[0] == 'PASS': | |
421 passes[test] = result | |
422 else: | |
423 failures[test] = actual_results[0] | |
424 | |
425 return (passes, failures, flakes) | |
426 | |
427 | |
428 def _ConvertTrieToFlatPaths(trie, prefix=None): | |
429 """Flatten the trie of failures into a list.""" | |
430 # Cloned from third_party/WebKit/Tools/Scripts/print-json-test-results | |
431 result = {} | |
432 for name, data in trie.iteritems(): | |
433 if prefix: | |
434 name = prefix + '/' + name | |
435 | |
436 if len(data) and 'actual' not in data and 'expected' not in data: | |
437 result.update(_ConvertTrieToFlatPaths(data, name)) | |
438 else: | |
439 result[name] = data | |
440 | |
441 return result | |
442 | |
443 | |
444 def _PrintDashboardLink(link_text, tests, max_tests): | |
445 """Add a link to the flakiness dashboard in the step annotations.""" | |
446 if len(tests) > max_tests: | |
447 test_list_text = ' '.join(tests[:max_tests]) + ' and more' | |
448 else: | |
449 test_list_text = ' '.join(tests) | |
450 | |
451 dashboard_base = ('http://test-results.appspot.com' | |
452 '/dashboards/flakiness_dashboard.html#' | |
453 'master=ChromiumWebkit&tests=') | |
454 | |
455 bb_annotations.PrintLink('%d %s: %s' % | |
456 (len(tests), link_text, test_list_text), | |
457 dashboard_base + ','.join(tests)) | |
458 | |
459 | |
460 def EscapeBuilderName(builder_name): | |
461 return re.sub('[ ()]', '_', builder_name) | |
462 | |
463 | |
464 def SpawnLogcatMonitor(): | |
465 shutil.rmtree(LOGCAT_DIR, ignore_errors=True) | |
466 bb_utils.SpawnCmd([ | |
467 os.path.join(CHROME_SRC_DIR, 'build', 'android', 'adb_logcat_monitor.py'), | |
468 LOGCAT_DIR]) | |
469 | |
470 # Wait for logcat_monitor to pull existing logcat | |
471 RunCmd(['sleep', '5']) | |
472 | |
473 | |
474 def ProvisionDevices(options): | |
475 bb_annotations.PrintNamedStep('provision_devices') | |
476 | |
477 if not bb_utils.TESTING: | |
478 # Restart adb to work around bugs, sleep to wait for usb discovery. | |
479 device_utils.RestartServer() | |
480 RunCmd(['sleep', '1']) | |
481 provision_cmd = ['build/android/provision_devices.py', '-t', options.target] | |
482 if options.auto_reconnect: | |
483 provision_cmd.append('--auto-reconnect') | |
484 if options.skip_wipe: | |
485 provision_cmd.append('--skip-wipe') | |
486 if options.disable_location: | |
487 provision_cmd.append('--disable-location') | |
488 RunCmd(provision_cmd, halt_on_failure=True) | |
489 | |
490 | |
491 def DeviceStatusCheck(options): | |
492 bb_annotations.PrintNamedStep('device_status_check') | |
493 cmd = ['build/android/buildbot/bb_device_status_check.py'] | |
494 if options.restart_usb: | |
495 cmd.append('--restart-usb') | |
496 RunCmd(cmd, halt_on_failure=True) | |
497 | |
498 | |
499 def GetDeviceSetupStepCmds(): | |
500 return [ | |
501 ('device_status_check', DeviceStatusCheck), | |
502 ('provision_devices', ProvisionDevices), | |
503 ] | |
504 | |
505 | |
506 def RunUnitTests(options): | |
507 suites = gtest_config.STABLE_TEST_SUITES | |
508 if options.asan: | |
509 suites = [s for s in suites | |
510 if s not in gtest_config.ASAN_EXCLUDED_TEST_SUITES] | |
511 RunTestSuites(options, suites) | |
512 | |
513 | |
514 def RunTelemetryUnitTests(options): | |
515 RunTelemetryTests(options, 'telemetry_unittests', 'tools/telemetry/run_tests') | |
516 | |
517 | |
518 def RunTelemetryPerfUnitTests(options): | |
519 RunTelemetryTests(options, 'telemetry_perf_unittests', 'tools/perf/run_tests') | |
520 | |
521 | |
522 def RunInstrumentationTests(options): | |
523 for test in INSTRUMENTATION_TESTS.itervalues(): | |
524 RunInstrumentationSuite(options, test) | |
525 | |
526 | |
527 def RunWebkitTests(options): | |
528 RunTestSuites(options, ['webkit_unit_tests', 'blink_heap_unittests']) | |
529 RunWebkitLint() | |
530 | |
531 | |
532 def RunGPUTests(options): | |
533 revision = _GetRevision(options) | |
534 builder_name = options.build_properties.get('buildername', 'noname') | |
535 | |
536 bb_annotations.PrintNamedStep('pixel_tests') | |
537 RunCmd(['content/test/gpu/run_gpu_test.py', | |
538 'pixel', '-v', | |
539 '--browser', | |
540 'android-content-shell', | |
541 '--build-revision', | |
542 str(revision), | |
543 '--upload-refimg-to-cloud-storage', | |
544 '--refimg-cloud-storage-bucket', | |
545 'chromium-gpu-archive/reference-images', | |
546 '--os-type', | |
547 'android', | |
548 '--test-machine-name', | |
549 EscapeBuilderName(builder_name)]) | |
550 | |
551 bb_annotations.PrintNamedStep('webgl_conformance_tests') | |
552 RunCmd(['content/test/gpu/run_gpu_test.py', '-v', | |
553 '--browser=android-content-shell', 'webgl_conformance', | |
554 '--webgl-conformance-version=1.0.1']) | |
555 | |
556 bb_annotations.PrintNamedStep('android_webview_webgl_conformance_tests') | |
557 RunCmd(['content/test/gpu/run_gpu_test.py', '-v', | |
558 '--browser=android-webview-shell', 'webgl_conformance', | |
559 '--webgl-conformance-version=1.0.1']) | |
560 | |
561 bb_annotations.PrintNamedStep('gpu_rasterization_tests') | |
562 RunCmd(['content/test/gpu/run_gpu_test.py', | |
563 'gpu_rasterization', '-v', | |
564 '--browser', | |
565 'android-content-shell', | |
566 '--build-revision', | |
567 str(revision), | |
568 '--test-machine-name', | |
569 EscapeBuilderName(builder_name)]) | |
570 | |
571 | |
572 def RunPythonUnitTests(_options): | |
573 for suite in constants.PYTHON_UNIT_TEST_SUITES: | |
574 bb_annotations.PrintNamedStep(suite) | |
575 RunCmd(['build/android/test_runner.py', 'python', '-s', suite]) | |
576 | |
577 | |
578 def GetTestStepCmds(): | |
579 return [ | |
580 ('base_junit_tests', | |
581 lambda _options: RunJunitSuite('base_junit_tests')), | |
582 ('chromedriver', RunChromeDriverTests), | |
583 ('chrome_proxy', RunChromeProxyTests), | |
584 ('components_browsertests', | |
585 lambda options: RunTestSuites(options, ['components_browsertests'])), | |
586 ('gfx_unittests', | |
587 lambda options: RunTestSuites(options, ['gfx_unittests'])), | |
588 ('gl_unittests', | |
589 lambda options: RunTestSuites(options, ['gl_unittests'])), | |
590 ('gpu', RunGPUTests), | |
591 ('python_unittests', RunPythonUnitTests), | |
592 ('telemetry_unittests', RunTelemetryUnitTests), | |
593 ('telemetry_perf_unittests', RunTelemetryPerfUnitTests), | |
594 ('ui', RunInstrumentationTests), | |
595 ('unit', RunUnitTests), | |
596 ('webkit', RunWebkitTests), | |
597 ('webkit_layout', RunWebkitLayoutTests), | |
598 ] | |
599 | |
600 | |
601 def MakeGSPath(options, gs_base_dir): | |
602 revision = _GetRevision(options) | |
603 bot_id = options.build_properties.get('buildername', 'testing') | |
604 randhash = hashlib.sha1(str(random.random())).hexdigest() | |
605 gs_path = '%s/%s/%s/%s' % (gs_base_dir, bot_id, revision, randhash) | |
606 # remove double slashes, happens with blank revisions and confuses gsutil | |
607 gs_path = re.sub('/+', '/', gs_path) | |
608 return gs_path | |
609 | |
610 def UploadHTML(options, gs_base_dir, dir_to_upload, link_text, | |
611 link_rel_path='index.html', gs_url=GS_URL): | |
612 """Uploads directory at |dir_to_upload| to Google Storage and output a link. | |
613 | |
614 Args: | |
615 options: Command line options. | |
616 gs_base_dir: The Google Storage base directory (e.g. | |
617 'chromium-code-coverage/java') | |
618 dir_to_upload: Absolute path to the directory to be uploaded. | |
619 link_text: Link text to be displayed on the step. | |
620 link_rel_path: Link path relative to |dir_to_upload|. | |
621 gs_url: Google storage URL. | |
622 """ | |
623 gs_path = MakeGSPath(options, gs_base_dir) | |
624 RunCmd([bb_utils.GSUTIL_PATH, 'cp', '-R', dir_to_upload, 'gs://%s' % gs_path]) | |
625 bb_annotations.PrintLink(link_text, | |
626 '%s/%s/%s' % (gs_url, gs_path, link_rel_path)) | |
627 | |
628 | |
629 def GenerateJavaCoverageReport(options): | |
630 """Generates an HTML coverage report using EMMA and uploads it.""" | |
631 bb_annotations.PrintNamedStep('java_coverage_report') | |
632 | |
633 coverage_html = os.path.join(options.coverage_dir, 'coverage_html') | |
634 RunCmd(['build/android/generate_emma_html.py', | |
635 '--coverage-dir', options.coverage_dir, | |
636 '--metadata-dir', os.path.join(CHROME_OUT_DIR, options.target), | |
637 '--cleanup', | |
638 '--output', os.path.join(coverage_html, 'index.html')]) | |
639 return coverage_html | |
640 | |
641 | |
642 def LogcatDump(options): | |
643 # Print logcat, kill logcat monitor | |
644 bb_annotations.PrintNamedStep('logcat_dump') | |
645 logcat_file = os.path.join(CHROME_OUT_DIR, options.target, 'full_log.txt') | |
646 RunCmd([SrcPath('build', 'android', 'adb_logcat_printer.py'), | |
647 '--output-path', logcat_file, LOGCAT_DIR]) | |
648 gs_path = MakeGSPath(options, 'chromium-android/logcat_dumps') | |
649 RunCmd([bb_utils.GSUTIL_PATH, 'cp', '-z', 'txt', logcat_file, | |
650 'gs://%s' % gs_path]) | |
651 bb_annotations.PrintLink('logcat dump', '%s/%s' % (GS_AUTH_URL, gs_path)) | |
652 | |
653 | |
654 def RunStackToolSteps(options): | |
655 """Run stack tool steps. | |
656 | |
657 Stack tool is run for logcat dump, optionally for ASAN. | |
658 """ | |
659 bb_annotations.PrintNamedStep('Run stack tool with logcat dump') | |
660 logcat_file = os.path.join(CHROME_OUT_DIR, options.target, 'full_log.txt') | |
661 RunCmd([os.path.join(CHROME_SRC_DIR, 'third_party', 'android_platform', | |
662 'development', 'scripts', 'stack'), | |
663 '--more-info', logcat_file]) | |
664 if options.asan_symbolize: | |
665 bb_annotations.PrintNamedStep('Run stack tool for ASAN') | |
666 RunCmd([ | |
667 os.path.join(CHROME_SRC_DIR, 'build', 'android', 'asan_symbolize.py'), | |
668 '-l', logcat_file]) | |
669 | |
670 | |
671 def GenerateTestReport(options): | |
672 bb_annotations.PrintNamedStep('test_report') | |
673 for report in glob.glob( | |
674 os.path.join(CHROME_OUT_DIR, options.target, 'test_logs', '*.log')): | |
675 RunCmd(['cat', report]) | |
676 os.remove(report) | |
677 | |
678 | |
679 def MainTestWrapper(options): | |
680 try: | |
681 # Spawn logcat monitor | |
682 SpawnLogcatMonitor() | |
683 | |
684 # Run all device setup steps | |
685 for _, cmd in GetDeviceSetupStepCmds(): | |
686 cmd(options) | |
687 | |
688 if options.install: | |
689 for i in options.install: | |
690 install_obj = INSTALLABLE_PACKAGES[i] | |
691 InstallApk(options, install_obj, print_step=True) | |
692 | |
693 if options.test_filter: | |
694 bb_utils.RunSteps(options.test_filter, GetTestStepCmds(), options) | |
695 | |
696 if options.coverage_bucket: | |
697 coverage_html = GenerateJavaCoverageReport(options) | |
698 UploadHTML(options, '%s/java' % options.coverage_bucket, coverage_html, | |
699 'Coverage Report') | |
700 shutil.rmtree(coverage_html, ignore_errors=True) | |
701 | |
702 if options.experimental: | |
703 RunTestSuites(options, gtest_config.EXPERIMENTAL_TEST_SUITES) | |
704 | |
705 finally: | |
706 # Run all post test steps | |
707 LogcatDump(options) | |
708 if not options.disable_stack_tool: | |
709 RunStackToolSteps(options) | |
710 GenerateTestReport(options) | |
711 # KillHostHeartbeat() has logic to check if heartbeat process is running, | |
712 # and kills only if it finds the process is running on the host. | |
713 provision_devices.KillHostHeartbeat() | |
714 if options.cleanup: | |
715 shutil.rmtree(os.path.join(CHROME_OUT_DIR, options.target), | |
716 ignore_errors=True) | |
717 | |
718 | |
719 def GetDeviceStepsOptParser(): | |
720 parser = bb_utils.GetParser() | |
721 parser.add_option('--experimental', action='store_true', | |
722 help='Run experiemental tests') | |
723 parser.add_option('-f', '--test-filter', metavar='<filter>', default=[], | |
724 action='append', | |
725 help=('Run a test suite. Test suites: "%s"' % | |
726 '", "'.join(VALID_TESTS))) | |
727 parser.add_option('--gtest-filter', | |
728 help='Filter for running a subset of tests of a gtest test') | |
729 parser.add_option('--asan', action='store_true', help='Run tests with asan.') | |
730 parser.add_option('--install', metavar='<apk name>', action="append", | |
731 help='Install an apk by name') | |
732 parser.add_option('--no-reboot', action='store_true', | |
733 help='Do not reboot devices during provisioning.') | |
734 parser.add_option('--coverage-bucket', | |
735 help=('Bucket name to store coverage results. Coverage is ' | |
736 'only run if this is set.')) | |
737 parser.add_option('--restart-usb', action='store_true', | |
738 help='Restart usb ports before device status check.') | |
739 parser.add_option( | |
740 '--flakiness-server', | |
741 help=('The flakiness dashboard server to which the results should be ' | |
742 'uploaded.')) | |
743 parser.add_option( | |
744 '--auto-reconnect', action='store_true', | |
745 help='Push script to device which restarts adbd on disconnections.') | |
746 parser.add_option('--skip-wipe', action='store_true', | |
747 help='Do not wipe devices during provisioning.') | |
748 parser.add_option('--disable-location', action='store_true', | |
749 help='Disable location settings.') | |
750 parser.add_option( | |
751 '--logcat-dump-output', | |
752 help='The logcat dump output will be "tee"-ed into this file') | |
753 # During processing perf bisects, a seperate working directory created under | |
754 # which builds are produced. Therefore we should look for relevent output | |
755 # file under this directory.(/b/build/slave/<slave_name>/build/bisect/src/out) | |
756 parser.add_option( | |
757 '--chrome-output-dir', | |
758 help='Chrome output directory to be used while bisecting.') | |
759 | |
760 parser.add_option('--disable-stack-tool', action='store_true', | |
761 help='Do not run stack tool.') | |
762 parser.add_option('--asan-symbolize', action='store_true', | |
763 help='Run stack tool for ASAN') | |
764 parser.add_option('--cleanup', action='store_true', | |
765 help='Delete out/<target> directory at the end of the run.') | |
766 return parser | |
767 | |
768 | |
769 def main(argv): | |
770 parser = GetDeviceStepsOptParser() | |
771 options, args = parser.parse_args(argv[1:]) | |
772 | |
773 if args: | |
774 return sys.exit('Unused args %s' % args) | |
775 | |
776 unknown_tests = set(options.test_filter) - VALID_TESTS | |
777 if unknown_tests: | |
778 return sys.exit('Unknown tests %s' % list(unknown_tests)) | |
779 | |
780 setattr(options, 'target', options.factory_properties.get('target', 'Debug')) | |
781 | |
782 if options.chrome_output_dir: | |
783 global CHROME_OUT_DIR | |
784 global LOGCAT_DIR | |
785 CHROME_OUT_DIR = options.chrome_output_dir | |
786 LOGCAT_DIR = os.path.join(CHROME_OUT_DIR, 'logcat') | |
787 | |
788 if options.coverage_bucket: | |
789 setattr(options, 'coverage_dir', | |
790 os.path.join(CHROME_OUT_DIR, options.target, 'coverage')) | |
791 | |
792 MainTestWrapper(options) | |
793 | |
794 | |
795 if __name__ == '__main__': | |
796 sys.exit(main(sys.argv)) | |
OLD | NEW |