Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import logging | 5 import logging |
| 6 import os | 6 import os |
| 7 import posixpath | 7 import posixpath |
| 8 import re | 8 import re |
| 9 import sys | |
| 10 import tempfile | |
| 9 import time | 11 import time |
| 10 | 12 |
| 11 from devil.android import device_errors | 13 from devil.android import device_errors |
| 12 from devil.android import flag_changer | 14 from devil.android import flag_changer |
| 13 from devil.android.sdk import shared_prefs | 15 from devil.android.sdk import shared_prefs |
| 14 from devil.utils import reraiser_thread | 16 from devil.utils import reraiser_thread |
| 15 from pylib import valgrind_tools | 17 from pylib import valgrind_tools |
| 16 from pylib.android import logdog_logcat_monitor | 18 from pylib.android import logdog_logcat_monitor |
| 17 from pylib.base import base_test_result | 19 from pylib.base import base_test_result |
| 20 from pylib.constants import host_paths | |
| 18 from pylib.instrumentation import instrumentation_test_instance | 21 from pylib.instrumentation import instrumentation_test_instance |
| 19 from pylib.local.device import local_device_environment | 22 from pylib.local.device import local_device_environment |
| 20 from pylib.local.device import local_device_test_run | 23 from pylib.local.device import local_device_test_run |
| 21 from pylib.utils import google_storage_helper | 24 from pylib.utils import google_storage_helper |
| 22 from pylib.utils import logdog_helper | 25 from pylib.utils import logdog_helper |
| 23 from py_trace_event import trace_event | 26 from py_trace_event import trace_event |
| 24 from py_utils import contextlib_ext | 27 from py_utils import contextlib_ext |
| 25 from py_utils import tempfile_ext | 28 from py_utils import tempfile_ext |
| 26 import tombstones | 29 import tombstones |
| 27 | 30 |
| 31 sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party')) | |
| 32 import jinja2 # pylint: disable=import-error | |
| 33 | |
| 34 | |
| 35 _JINJA_TEMPLATE_DIR = os.path.join( | |
| 36 host_paths.DIR_SOURCE_ROOT, 'build', 'android', 'pylib', 'instrumentation') | |
| 37 _JINJA_TEMPLATE_FILENAME = 'render_test.html.jinja' | |
| 38 | |
| 28 _TAG = 'test_runner_py' | 39 _TAG = 'test_runner_py' |
| 29 | 40 |
| 30 TIMEOUT_ANNOTATIONS = [ | 41 TIMEOUT_ANNOTATIONS = [ |
| 31 ('Manual', 10 * 60 * 60), | 42 ('Manual', 10 * 60 * 60), |
| 32 ('IntegrationTest', 30 * 60), | 43 ('IntegrationTest', 30 * 60), |
| 33 ('External', 10 * 60), | 44 ('External', 10 * 60), |
| 34 ('EnormousTest', 10 * 60), | 45 ('EnormousTest', 10 * 60), |
| 35 ('LargeTest', 5 * 60), | 46 ('LargeTest', 5 * 60), |
| 36 ('MediumTest', 3 * 60), | 47 ('MediumTest', 3 * 60), |
| 37 ('SmallTest', 1 * 60), | 48 ('SmallTest', 1 * 60), |
| 38 ] | 49 ] |
| 39 | 50 |
| 40 LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v'] | 51 LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v'] |
| 41 | 52 |
| 53 FEATURE_ANNOTATION = 'Feature' | |
| 54 RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest' | |
| 55 | |
| 56 # This needs to be kept in sync with formatting in |RenderUtils.imageName| | |
| 57 RE_RENDER_IMAGE_NAME = re.compile( | |
| 58 r'(?P<test_class>\w+)\.' | |
| 59 r'(?P<description>\w+)\.' | |
| 60 r'(?P<device_model>\w+)\.' | |
| 61 r'(?P<orientation>port|land)\.png') | |
| 42 | 62 |
| 43 # TODO(jbudorick): Make this private once the instrumentation test_runner is | 63 # TODO(jbudorick): Make this private once the instrumentation test_runner is |
| 44 # deprecated. | 64 # deprecated. |
| 45 def DidPackageCrashOnDevice(package_name, device): | 65 def DidPackageCrashOnDevice(package_name, device): |
| 46 # Dismiss any error dialogs. Limit the number in case we have an error | 66 # Dismiss any error dialogs. Limit the number in case we have an error |
| 47 # loop or we are failing to dismiss. | 67 # loop or we are failing to dismiss. |
| 48 try: | 68 try: |
| 49 for _ in xrange(10): | 69 for _ in xrange(10): |
| 50 package = device.DismissCrashDialogIfNeeded() | 70 package = device.DismissCrashDialogIfNeeded() |
| 51 if not package: | 71 if not package: |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 239 return tests | 259 return tests |
| 240 | 260 |
| 241 #override | 261 #override |
| 242 def _GetUniqueTestName(self, test): | 262 def _GetUniqueTestName(self, test): |
| 243 return instrumentation_test_instance.GetUniqueTestName(test) | 263 return instrumentation_test_instance.GetUniqueTestName(test) |
| 244 | 264 |
| 245 #override | 265 #override |
| 246 def _RunTest(self, device, test): | 266 def _RunTest(self, device, test): |
| 247 extras = {} | 267 extras = {} |
| 248 | 268 |
| 249 flags = None | 269 flags_to_add = [] |
| 270 flags_to_remove = [] | |
| 250 test_timeout_scale = None | 271 test_timeout_scale = None |
| 251 if self._test_instance.coverage_directory: | 272 if self._test_instance.coverage_directory: |
| 252 coverage_basename = '%s.ec' % ('%s_group' % test[0]['method'] | 273 coverage_basename = '%s.ec' % ('%s_group' % test[0]['method'] |
| 253 if isinstance(test, list) else test['method']) | 274 if isinstance(test, list) else test['method']) |
| 254 extras['coverage'] = 'true' | 275 extras['coverage'] = 'true' |
| 255 coverage_directory = os.path.join( | 276 coverage_directory = os.path.join( |
| 256 device.GetExternalStoragePath(), 'chrome', 'test', 'coverage') | 277 device.GetExternalStoragePath(), 'chrome', 'test', 'coverage') |
| 257 coverage_device_file = os.path.join( | 278 coverage_device_file = os.path.join( |
| 258 coverage_directory, coverage_basename) | 279 coverage_directory, coverage_basename) |
| 259 extras['coverageFile'] = coverage_device_file | 280 extras['coverageFile'] = coverage_device_file |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 286 test_display_name = self._GetUniqueTestName(test) | 307 test_display_name = self._GetUniqueTestName(test) |
| 287 if test['is_junit4']: | 308 if test['is_junit4']: |
| 288 target = '%s/%s' % ( | 309 target = '%s/%s' % ( |
| 289 self._test_instance.test_package, | 310 self._test_instance.test_package, |
| 290 self._test_instance.test_runner_junit4) | 311 self._test_instance.test_runner_junit4) |
| 291 else: | 312 else: |
| 292 target = '%s/%s' % ( | 313 target = '%s/%s' % ( |
| 293 self._test_instance.test_package, self._test_instance.test_runner) | 314 self._test_instance.test_package, self._test_instance.test_runner) |
| 294 extras['class'] = test_name | 315 extras['class'] = test_name |
| 295 if 'flags' in test: | 316 if 'flags' in test: |
| 296 flags = test['flags'] | 317 flags_to_add.extend(test['flags'].add) |
| 318 flags_to_remove.extend(test['flags'].remove) | |
| 297 timeout = self._GetTimeoutFromAnnotations( | 319 timeout = self._GetTimeoutFromAnnotations( |
| 298 test['annotations'], test_display_name) | 320 test['annotations'], test_display_name) |
| 299 | 321 |
| 300 test_timeout_scale = self._GetTimeoutScaleFromAnnotations( | 322 test_timeout_scale = self._GetTimeoutScaleFromAnnotations( |
| 301 test['annotations']) | 323 test['annotations']) |
| 302 if test_timeout_scale and test_timeout_scale != 1: | 324 if test_timeout_scale and test_timeout_scale != 1: |
| 303 valgrind_tools.SetChromeTimeoutScale( | 325 valgrind_tools.SetChromeTimeoutScale( |
| 304 device, test_timeout_scale * self._test_instance.timeout_scale) | 326 device, test_timeout_scale * self._test_instance.timeout_scale) |
| 305 | 327 |
| 306 logging.info('preparing to run %s: %s', test_display_name, test) | 328 logging.info('preparing to run %s: %s', test_display_name, test) |
| 307 | 329 |
| 308 if flags: | 330 render_tests_device_output_dir = None |
| 331 if _IsRenderTest(test): | |
| 332 # TODO(mikecase): Add DeviceTempDirectory class and use that instead. | |
|
jbudorick
2017/05/10 15:10:05
This would be a pretty nice addition to device_tem
| |
| 333 render_tests_device_output_dir = posixpath.join( | |
| 334 device.GetExternalStoragePath(), | |
| 335 'render_test_output_dir') | |
| 336 flags_to_add.append('--render-test-output-dir=%s' % | |
| 337 render_tests_device_output_dir) | |
| 338 | |
| 339 if flags_to_add or flags_to_remove: | |
| 309 self._CreateFlagChangerIfNeeded(device) | 340 self._CreateFlagChangerIfNeeded(device) |
| 310 self._flag_changers[str(device)].PushFlags( | 341 self._flag_changers[str(device)].PushFlags( |
| 311 add=flags.add, remove=flags.remove) | 342 add=flags_to_add, remove=flags_to_remove) |
| 312 | 343 |
| 313 try: | 344 try: |
| 314 device.RunShellCommand( | 345 device.RunShellCommand( |
| 315 ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name], | 346 ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name], |
| 316 check_return=True) | 347 check_return=True) |
| 317 time_ms = lambda: int(time.time() * 1e3) | 348 time_ms = lambda: int(time.time() * 1e3) |
| 318 start_ms = time_ms() | 349 start_ms = time_ms() |
| 319 | 350 |
| 320 stream_name = 'logcat_%s_%s_%s' % ( | 351 stream_name = 'logcat_%s_%s_%s' % ( |
| 321 test_name.replace('#', '.'), | 352 test_name.replace('#', '.'), |
| 322 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), | 353 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), |
| 323 device.serial) | 354 device.serial) |
| 324 logmon = logdog_logcat_monitor.LogdogLogcatMonitor( | 355 logmon = logdog_logcat_monitor.LogdogLogcatMonitor( |
| 325 device.adb, stream_name, filter_specs=LOGCAT_FILTERS) | 356 device.adb, stream_name, filter_specs=LOGCAT_FILTERS) |
| 326 | 357 |
| 327 with contextlib_ext.Optional( | 358 with contextlib_ext.Optional( |
| 328 logmon, self._test_instance.should_save_logcat): | 359 logmon, self._test_instance.should_save_logcat): |
| 329 with contextlib_ext.Optional( | 360 with contextlib_ext.Optional( |
| 330 trace_event.trace(test_name), | 361 trace_event.trace(test_name), |
| 331 self._env.trace_output): | 362 self._env.trace_output): |
| 332 output = device.StartInstrumentation( | 363 output = device.StartInstrumentation( |
| 333 target, raw=True, extras=extras, timeout=timeout, retries=0) | 364 target, raw=True, extras=extras, timeout=timeout, retries=0) |
| 334 logcat_url = logmon.GetLogcatURL() | 365 logcat_url = logmon.GetLogcatURL() |
| 335 finally: | 366 finally: |
| 336 device.RunShellCommand( | 367 device.RunShellCommand( |
| 337 ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name], | 368 ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name], |
| 338 check_return=True) | 369 check_return=True) |
| 339 duration_ms = time_ms() - start_ms | 370 duration_ms = time_ms() - start_ms |
| 340 if flags: | 371 if flags_to_add or flags_to_remove: |
| 341 self._flag_changers[str(device)].Restore() | 372 self._flag_changers[str(device)].Restore() |
| 342 if test_timeout_scale: | 373 if test_timeout_scale: |
| 343 valgrind_tools.SetChromeTimeoutScale( | 374 valgrind_tools.SetChromeTimeoutScale( |
| 344 device, self._test_instance.timeout_scale) | 375 device, self._test_instance.timeout_scale) |
| 345 | 376 |
| 346 # TODO(jbudorick): Make instrumentation tests output a JSON so this | 377 # TODO(jbudorick): Make instrumentation tests output a JSON so this |
| 347 # doesn't have to parse the output. | 378 # doesn't have to parse the output. |
| 348 result_code, result_bundle, statuses = ( | 379 result_code, result_bundle, statuses = ( |
| 349 self._test_instance.ParseAmInstrumentRawOutput(output)) | 380 self._test_instance.ParseAmInstrumentRawOutput(output)) |
| 350 results = self._test_instance.GenerateTestResults( | 381 results = self._test_instance.GenerateTestResults( |
| 351 result_code, result_bundle, statuses, start_ms, duration_ms) | 382 result_code, result_bundle, statuses, start_ms, duration_ms) |
| 352 for result in results: | 383 for result in results: |
| 353 if logcat_url: | 384 if logcat_url: |
| 354 result.SetLink('logcat', logcat_url) | 385 result.SetLink('logcat', logcat_url) |
| 355 | 386 |
| 387 if _IsRenderTest(test): | |
| 388 # Render tests do not cause test failure by default. So we have to check | |
| 389 # to see if any failure images were generated even if the test does not | |
| 390 # fail. | |
| 391 try: | |
| 392 self._ProcessRenderTestResults( | |
| 393 device, render_tests_device_output_dir, results) | |
| 394 finally: | |
| 395 device.RemovePath(render_tests_device_output_dir, | |
| 396 recursive=True, force=True) | |
| 397 | |
| 356 # Update the result name if the test used flags. | 398 # Update the result name if the test used flags. |
| 357 if flags: | 399 if flags_to_add or flags_to_remove: |
| 358 for r in results: | 400 for r in results: |
| 359 if r.GetName() == test_name: | 401 if r.GetName() == test_name: |
| 360 r.SetName(test_display_name) | 402 r.SetName(test_display_name) |
| 361 | 403 |
| 362 # Add UNKNOWN results for any missing tests. | 404 # Add UNKNOWN results for any missing tests. |
| 363 iterable_test = test if isinstance(test, list) else [test] | 405 iterable_test = test if isinstance(test, list) else [test] |
| 364 test_names = set(self._GetUniqueTestName(t) for t in iterable_test) | 406 test_names = set(self._GetUniqueTestName(t) for t in iterable_test) |
| 365 results_names = set(r.GetName() for r in results) | 407 results_names = set(r.GetName() for r in results) |
| 366 results.extend( | 408 results.extend( |
| 367 base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN) | 409 base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN) |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 435 include_stack_symbols=False, | 477 include_stack_symbols=False, |
| 436 wipe_tombstones=True) | 478 wipe_tombstones=True) |
| 437 stream_name = 'tombstones_%s_%s' % ( | 479 stream_name = 'tombstones_%s_%s' % ( |
| 438 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), | 480 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), |
| 439 device.serial) | 481 device.serial) |
| 440 tombstones_url = logdog_helper.text( | 482 tombstones_url = logdog_helper.text( |
| 441 stream_name, '\n'.join(resolved_tombstones)) | 483 stream_name, '\n'.join(resolved_tombstones)) |
| 442 result.SetLink('tombstones', tombstones_url) | 484 result.SetLink('tombstones', tombstones_url) |
| 443 return results, None | 485 return results, None |
| 444 | 486 |
| 487 def _ProcessRenderTestResults( | |
| 488 self, device, render_tests_device_output_dir, results): | |
| 489 # Will archive test images if we are given a GS bucket to store the results | |
| 490 # in and are given a results file to output the links to. | |
| 491 if not bool(self._test_instance.gs_results_bucket): | |
| 492 return | |
| 493 | |
| 494 failure_images_device_dir = posixpath.join( | |
| 495 render_tests_device_output_dir, 'failures') | |
| 496 | |
| 497 if not device.FileExists(failure_images_device_dir): | |
| 498 return | |
| 499 | |
| 500 render_tests_bucket = ( | |
| 501 self._test_instance.gs_results_bucket + '/render_tests') | |
| 502 | |
| 503 diff_images_device_dir = posixpath.join( | |
| 504 render_tests_device_output_dir, 'diffs') | |
| 505 | |
| 506 golden_images_device_dir = posixpath.join( | |
| 507 render_tests_device_output_dir, 'goldens') | |
| 508 | |
| 509 with tempfile_ext.NamedTemporaryDirectory() as temp_dir: | |
| 510 device.PullFile(failure_images_device_dir, temp_dir) | |
| 511 | |
| 512 if device.FileExists(diff_images_device_dir): | |
| 513 device.PullFile(diff_images_device_dir, temp_dir) | |
| 514 else: | |
| 515 logging.error('Diff images not found on device.') | |
| 516 | |
| 517 if device.FileExists(golden_images_device_dir): | |
| 518 device.PullFile(golden_images_device_dir, temp_dir) | |
| 519 else: | |
| 520 logging.error('Golden images not found on device.') | |
| 521 | |
| 522 for failure_filename in os.listdir(os.path.join(temp_dir, 'failures')): | |
| 523 | |
| 524 m = RE_RENDER_IMAGE_NAME.match(failure_filename) | |
| 525 if not m: | |
| 526 logging.warning('Unexpected file in render test failures: %s', | |
| 527 failure_filename) | |
| 528 continue | |
| 529 | |
| 530 failure_filepath = os.path.join(temp_dir, 'failures', failure_filename) | |
| 531 failure_link = google_storage_helper.upload( | |
| 532 google_storage_helper.unique_name( | |
| 533 'failure_%s' % failure_filename, device=device), | |
| 534 failure_filepath, | |
| 535 bucket=render_tests_bucket) | |
| 536 | |
| 537 golden_filepath = os.path.join(temp_dir, 'goldens', failure_filename) | |
| 538 if os.path.exists(golden_filepath): | |
| 539 golden_link = google_storage_helper.upload( | |
| 540 google_storage_helper.unique_name( | |
| 541 'golden_%s' % failure_filename, device=device), | |
| 542 golden_filepath, | |
| 543 bucket=render_tests_bucket) | |
| 544 else: | |
| 545 golden_link = '' | |
| 546 | |
| 547 diff_filepath = os.path.join(temp_dir, 'diffs', failure_filename) | |
| 548 if os.path.exists(diff_filepath): | |
| 549 diff_link = google_storage_helper.upload( | |
| 550 google_storage_helper.unique_name( | |
| 551 'diff_%s' % failure_filename, device=device), | |
| 552 diff_filepath, | |
| 553 bucket=render_tests_bucket) | |
| 554 else: | |
| 555 diff_link = '' | |
| 556 | |
| 557 with tempfile.NamedTemporaryFile(suffix='.html') as temp_html: | |
| 558 jinja2_env = jinja2.Environment( | |
| 559 loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), | |
| 560 trim_blocks=True) | |
| 561 template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME) | |
| 562 # pylint: disable=no-member | |
| 563 processed_template_output = template.render( | |
| 564 failure_link=failure_link, | |
| 565 golden_link=golden_link, | |
| 566 diff_link=diff_link) | |
| 567 | |
| 568 temp_html.write(processed_template_output) | |
| 569 temp_html.flush() | |
| 570 html_results_link = google_storage_helper.upload( | |
| 571 google_storage_helper.unique_name('render_html', device=device), | |
| 572 temp_html.name, | |
| 573 bucket=render_tests_bucket, | |
| 574 content_type='text/html') | |
| 575 for result in results: | |
| 576 result.SetLink(failure_filename, html_results_link) | |
| 577 | |
| 445 #override | 578 #override |
| 446 def _ShouldRetry(self, test): | 579 def _ShouldRetry(self, test): |
| 447 if 'RetryOnFailure' in test.get('annotations', {}): | 580 if 'RetryOnFailure' in test.get('annotations', {}): |
| 448 return True | 581 return True |
| 449 | 582 |
| 450 # TODO(jbudorick): Remove this log message once @RetryOnFailure has been | 583 # TODO(jbudorick): Remove this log message once @RetryOnFailure has been |
| 451 # enabled for a while. See crbug.com/619055 for more details. | 584 # enabled for a while. See crbug.com/619055 for more details. |
| 452 logging.error('Default retries are being phased out. crbug.com/619055') | 585 logging.error('Default retries are being phased out. crbug.com/619055') |
| 453 return False | 586 return False |
| 454 | 587 |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 470 if k in annotations: | 603 if k in annotations: |
| 471 timeout = v | 604 timeout = v |
| 472 break | 605 break |
| 473 else: | 606 else: |
| 474 logging.warning('Using default 1 minute timeout for %s', test_name) | 607 logging.warning('Using default 1 minute timeout for %s', test_name) |
| 475 timeout = 60 | 608 timeout = 60 |
| 476 | 609 |
| 477 timeout *= cls._GetTimeoutScaleFromAnnotations(annotations) | 610 timeout *= cls._GetTimeoutScaleFromAnnotations(annotations) |
| 478 | 611 |
| 479 return timeout | 612 return timeout |
| 613 | |
| 614 def _IsRenderTest(test): | |
| 615 """Determines if a test or list of tests has a RenderTest amongst them.""" | |
| 616 if not isinstance(test, list): | |
| 617 test = [test] | |
| 618 return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get( | |
| 619 FEATURE_ANNOTATION, ())] for t in test) | |
| OLD | NEW |