Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(830)

Side by Side Diff: build/android/pylib/local/device/local_device_instrumentation_test_run.py

Issue 2866103002: Add render test results to the results_details webpage. (Closed)
Patch Set: (Reland) Add failure screenshots and images to results detail. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import logging 5 import logging
6 import os 6 import os
7 import posixpath 7 import posixpath
8 import re 8 import re
9 import tempfile
9 import time 10 import time
10 11
11 from devil.android import device_errors 12 from devil.android import device_errors
12 from devil.android import flag_changer 13 from devil.android import flag_changer
13 from devil.android.sdk import shared_prefs 14 from devil.android.sdk import shared_prefs
14 from devil.utils import reraiser_thread 15 from devil.utils import reraiser_thread
15 from pylib import valgrind_tools 16 from pylib import valgrind_tools
16 from pylib.android import logdog_logcat_monitor 17 from pylib.android import logdog_logcat_monitor
17 from pylib.base import base_test_result 18 from pylib.base import base_test_result
18 from pylib.instrumentation import instrumentation_test_instance 19 from pylib.instrumentation import instrumentation_test_instance
(...skipping 13 matching lines...) Expand all
32 ('IntegrationTest', 30 * 60), 33 ('IntegrationTest', 30 * 60),
33 ('External', 10 * 60), 34 ('External', 10 * 60),
34 ('EnormousTest', 10 * 60), 35 ('EnormousTest', 10 * 60),
35 ('LargeTest', 5 * 60), 36 ('LargeTest', 5 * 60),
36 ('MediumTest', 3 * 60), 37 ('MediumTest', 3 * 60),
37 ('SmallTest', 1 * 60), 38 ('SmallTest', 1 * 60),
38 ] 39 ]
39 40
40 LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v'] 41 LOGCAT_FILTERS = ['*:e', 'chromium:v', 'cr_*:v']
41 42
43 FEATURE_ANNOTATION = 'Feature'
44 RENDER_TEST_FEATURE_ANNOTATION = 'RenderTest'
45
46 RE_RENDER_IMAGE_NAME = re.compile(
PEConn 2017/05/09 08:52:39 Could you please put a comment here pointing to Re
jbudorick 2017/05/09 14:33:26 Might be good to add a few tests on both sides as
mikecase (-- gone --) 2017/05/10 01:05:00 Done
47 r'(?P<test_class>\w+)\.'
48 r'(?P<description>\w+)\.'
49 r'(?P<device_model>\w+)\.'
50 r'(?P<orientation>port|land)\.png')
51
52 RENDER_TESTS_HTML_TEMPLATE = '''
PEConn 2017/05/09 08:52:39 Could you move this into a separate file (or Jinja
mikecase (-- gone --) 2017/05/10 01:05:00 Done. Moved into own file so it will be easier to
53 <html>
54 <table>
55 <tr>
56 <th>Failure</th>
57 <th>Golden</th>
58 <th>Diff</th>
59 </tr>
60 <tr>
61 <td><img src="%s"/></td>
jbudorick 2017/05/09 14:33:26 Are these ok with an empty URL? (It's been a while
mikecase (-- gone --) 2017/05/10 01:05:00 Yeah, I doubled checked, it just doesnt load anyth
62 <td><img src="%s"/></td>
63 <td><img src="%s"/></td>
64 </tr>
65 </table>
66 </html>
67 '''
42 68
43 # TODO(jbudorick): Make this private once the instrumentation test_runner is 69 # TODO(jbudorick): Make this private once the instrumentation test_runner is
44 # deprecated. 70 # deprecated.
45 def DidPackageCrashOnDevice(package_name, device): 71 def DidPackageCrashOnDevice(package_name, device):
46 # Dismiss any error dialogs. Limit the number in case we have an error 72 # Dismiss any error dialogs. Limit the number in case we have an error
47 # loop or we are failing to dismiss. 73 # loop or we are failing to dismiss.
48 try: 74 try:
49 for _ in xrange(10): 75 for _ in xrange(10):
50 package = device.DismissCrashDialogIfNeeded() 76 package = device.DismissCrashDialogIfNeeded()
51 if not package: 77 if not package:
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
239 return tests 265 return tests
240 266
241 #override 267 #override
242 def _GetUniqueTestName(self, test): 268 def _GetUniqueTestName(self, test):
243 return instrumentation_test_instance.GetUniqueTestName(test) 269 return instrumentation_test_instance.GetUniqueTestName(test)
244 270
245 #override 271 #override
246 def _RunTest(self, device, test): 272 def _RunTest(self, device, test):
247 extras = {} 273 extras = {}
248 274
249 flags = None 275 flags_to_add = []
jbudorick 2017/05/09 14:33:26 This change was in your other CL?
mikecase (-- gone --) 2017/05/10 01:05:00 Yeah, have to make the same change here as well. W
276 flags_to_remove = []
250 test_timeout_scale = None 277 test_timeout_scale = None
251 if self._test_instance.coverage_directory: 278 if self._test_instance.coverage_directory:
252 coverage_basename = '%s.ec' % ('%s_group' % test[0]['method'] 279 coverage_basename = '%s.ec' % ('%s_group' % test[0]['method']
253 if isinstance(test, list) else test['method']) 280 if isinstance(test, list) else test['method'])
254 extras['coverage'] = 'true' 281 extras['coverage'] = 'true'
255 coverage_directory = os.path.join( 282 coverage_directory = os.path.join(
256 device.GetExternalStoragePath(), 'chrome', 'test', 'coverage') 283 device.GetExternalStoragePath(), 'chrome', 'test', 'coverage')
257 coverage_device_file = os.path.join( 284 coverage_device_file = os.path.join(
258 coverage_directory, coverage_basename) 285 coverage_directory, coverage_basename)
259 extras['coverageFile'] = coverage_device_file 286 extras['coverageFile'] = coverage_device_file
(...skipping 26 matching lines...) Expand all
286 test_display_name = self._GetUniqueTestName(test) 313 test_display_name = self._GetUniqueTestName(test)
287 if test['is_junit4']: 314 if test['is_junit4']:
288 target = '%s/%s' % ( 315 target = '%s/%s' % (
289 self._test_instance.test_package, 316 self._test_instance.test_package,
290 self._test_instance.test_runner_junit4) 317 self._test_instance.test_runner_junit4)
291 else: 318 else:
292 target = '%s/%s' % ( 319 target = '%s/%s' % (
293 self._test_instance.test_package, self._test_instance.test_runner) 320 self._test_instance.test_package, self._test_instance.test_runner)
294 extras['class'] = test_name 321 extras['class'] = test_name
295 if 'flags' in test: 322 if 'flags' in test:
296 flags = test['flags'] 323 flags_to_add.extend(test['flags'].add)
324 flags_to_remove.extend(test['flags'].remove)
297 timeout = self._GetTimeoutFromAnnotations( 325 timeout = self._GetTimeoutFromAnnotations(
298 test['annotations'], test_display_name) 326 test['annotations'], test_display_name)
299 327
300 test_timeout_scale = self._GetTimeoutScaleFromAnnotations( 328 test_timeout_scale = self._GetTimeoutScaleFromAnnotations(
301 test['annotations']) 329 test['annotations'])
302 if test_timeout_scale and test_timeout_scale != 1: 330 if test_timeout_scale and test_timeout_scale != 1:
303 valgrind_tools.SetChromeTimeoutScale( 331 valgrind_tools.SetChromeTimeoutScale(
304 device, test_timeout_scale * self._test_instance.timeout_scale) 332 device, test_timeout_scale * self._test_instance.timeout_scale)
305 333
306 logging.info('preparing to run %s: %s', test_display_name, test) 334 logging.info('preparing to run %s: %s', test_display_name, test)
307 335
308 if flags: 336 render_tests_device_output_dir = None
337 if _IsRenderTest(test):
338 # TODO(mikecase): Add DeviceTempDirectory class and use that instead.
339 render_tests_device_output_dir = posixpath.join(
340 device.GetExternalStoragePath(),
341 'render_test_output_dir')
342 flags_to_add.append('--render-test-output-dir=%s' %
343 render_tests_device_output_dir)
344
345 if flags_to_add or flags_to_remove:
309 self._CreateFlagChangerIfNeeded(device) 346 self._CreateFlagChangerIfNeeded(device)
310 self._flag_changers[str(device)].PushFlags( 347 self._flag_changers[str(device)].PushFlags(
311 add=flags.add, remove=flags.remove) 348 add=flags_to_add, remove=flags_to_remove)
312 349
313 try: 350 try:
314 device.RunShellCommand( 351 device.RunShellCommand(
315 ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name], 352 ['log', '-p', 'i', '-t', _TAG, 'START %s' % test_name],
316 check_return=True) 353 check_return=True)
317 time_ms = lambda: int(time.time() * 1e3) 354 time_ms = lambda: int(time.time() * 1e3)
318 start_ms = time_ms() 355 start_ms = time_ms()
319 356
320 stream_name = 'logcat_%s_%s_%s' % ( 357 stream_name = 'logcat_%s_%s_%s' % (
321 test_name.replace('#', '.'), 358 test_name.replace('#', '.'),
322 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), 359 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
323 device.serial) 360 device.serial)
324 logmon = logdog_logcat_monitor.LogdogLogcatMonitor( 361 logmon = logdog_logcat_monitor.LogdogLogcatMonitor(
325 device.adb, stream_name, filter_specs=LOGCAT_FILTERS) 362 device.adb, stream_name, filter_specs=LOGCAT_FILTERS)
326 363
327 with contextlib_ext.Optional( 364 with contextlib_ext.Optional(
328 logmon, self._test_instance.should_save_logcat): 365 logmon, self._test_instance.should_save_logcat):
329 with contextlib_ext.Optional( 366 with contextlib_ext.Optional(
330 trace_event.trace(test_name), 367 trace_event.trace(test_name),
331 self._env.trace_output): 368 self._env.trace_output):
332 output = device.StartInstrumentation( 369 output = device.StartInstrumentation(
333 target, raw=True, extras=extras, timeout=timeout, retries=0) 370 target, raw=True, extras=extras, timeout=timeout, retries=0)
334 logcat_url = logmon.GetLogcatURL() 371 logcat_url = logmon.GetLogcatURL()
335 finally: 372 finally:
336 device.RunShellCommand( 373 device.RunShellCommand(
337 ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name], 374 ['log', '-p', 'i', '-t', _TAG, 'END %s' % test_name],
338 check_return=True) 375 check_return=True)
339 duration_ms = time_ms() - start_ms 376 duration_ms = time_ms() - start_ms
340 if flags: 377 if flags_to_add or flags_to_remove:
341 self._flag_changers[str(device)].Restore() 378 self._flag_changers[str(device)].Restore()
342 if test_timeout_scale: 379 if test_timeout_scale:
343 valgrind_tools.SetChromeTimeoutScale( 380 valgrind_tools.SetChromeTimeoutScale(
344 device, self._test_instance.timeout_scale) 381 device, self._test_instance.timeout_scale)
345 382
346 # TODO(jbudorick): Make instrumentation tests output a JSON so this 383 # TODO(jbudorick): Make instrumentation tests output a JSON so this
347 # doesn't have to parse the output. 384 # doesn't have to parse the output.
348 result_code, result_bundle, statuses = ( 385 result_code, result_bundle, statuses = (
349 self._test_instance.ParseAmInstrumentRawOutput(output)) 386 self._test_instance.ParseAmInstrumentRawOutput(output))
350 results = self._test_instance.GenerateTestResults( 387 results = self._test_instance.GenerateTestResults(
351 result_code, result_bundle, statuses, start_ms, duration_ms) 388 result_code, result_bundle, statuses, start_ms, duration_ms)
352 for result in results: 389 for result in results:
353 if logcat_url: 390 if logcat_url:
354 result.SetLink('logcat', logcat_url) 391 result.SetLink('logcat', logcat_url)
355 392
393 if _IsRenderTest(test):
394 # Render tests do not cause test failure by default. So we have to check
395 # to see if any failure images were generated even if the test does not
396 # fail.
397 self._ProcessRenderTestResults(
398 device, render_tests_device_output_dir, results)
399
356 # Update the result name if the test used flags. 400 # Update the result name if the test used flags.
357 if flags: 401 if flags_to_add or flags_to_remove:
358 for r in results: 402 for r in results:
359 if r.GetName() == test_name: 403 if r.GetName() == test_name:
360 r.SetName(test_display_name) 404 r.SetName(test_display_name)
361 405
362 # Add UNKNOWN results for any missing tests. 406 # Add UNKNOWN results for any missing tests.
363 iterable_test = test if isinstance(test, list) else [test] 407 iterable_test = test if isinstance(test, list) else [test]
364 test_names = set(self._GetUniqueTestName(t) for t in iterable_test) 408 test_names = set(self._GetUniqueTestName(t) for t in iterable_test)
365 results_names = set(r.GetName() for r in results) 409 results_names = set(r.GetName() for r in results)
366 results.extend( 410 results.extend(
367 base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN) 411 base_test_result.BaseTestResult(u, base_test_result.ResultType.UNKNOWN)
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
435 include_stack_symbols=False, 479 include_stack_symbols=False,
436 wipe_tombstones=True) 480 wipe_tombstones=True)
437 stream_name = 'tombstones_%s_%s' % ( 481 stream_name = 'tombstones_%s_%s' % (
438 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()), 482 time.strftime('%Y%m%dT%H%M%S-UTC', time.gmtime()),
439 device.serial) 483 device.serial)
440 tombstones_url = logdog_helper.text( 484 tombstones_url = logdog_helper.text(
441 stream_name, '\n'.join(resolved_tombstones)) 485 stream_name, '\n'.join(resolved_tombstones))
442 result.SetLink('tombstones', tombstones_url) 486 result.SetLink('tombstones', tombstones_url)
443 return results, None 487 return results, None
444 488
489 def _ProcessRenderTestResults(
490 self, device, render_tests_device_output_dir, results):
491 # Will archive test images if we are given a GS bucket to store the results
492 # in and are given a results file to output the links to.
493 if not bool(self._test_instance.gs_results_bucket):
494 return
495
496 failure_images_device_dir = posixpath.join(
497 render_tests_device_output_dir, 'failures')
498
499 if not device.FileExists(failure_images_device_dir):
500 return
501
502 render_tests_bucket = (
503 self._test_instance.gs_results_bucket + '/render_tests')
504
505 diff_images_device_dir = posixpath.join(
506 render_tests_device_output_dir, 'diffs')
507
508 golden_images_device_dir = posixpath.join(
509 render_tests_device_output_dir, 'goldens')
510
511 with tempfile_ext.NamedTemporaryDirectory() as temp_dir:
512 try:
513 device.PullFile(failure_images_device_dir, temp_dir)
514
515 if device.FileExists(diff_images_device_dir):
516 device.PullFile(diff_images_device_dir, temp_dir)
517 else:
518 logging.error('Diff images not found on device.')
519
520 if device.FileExists(golden_images_device_dir):
521 device.PullFile(golden_images_device_dir, temp_dir)
522 else:
523 logging.error('Golden images not found on device.')
524 finally:
525 device.RemovePath(render_tests_device_output_dir, recursive=True)
jbudorick 2017/05/09 14:33:26 I think this should be the responsibility of _RunT
mikecase (-- gone --) 2017/05/10 01:05:00 Done
526
527 for failure_filename in os.listdir(os.path.join(temp_dir, 'failures')):
528
529 m = RE_RENDER_IMAGE_NAME.match(failure_filename)
530 if not m:
531 logging.warning('Unexpected file in render test failures: %s',
532 failure_filename)
533 continue
534
535 failure_filepath = os.path.join(temp_dir, 'failures', failure_filename)
536 failure_link = google_storage_helper.upload(
537 google_storage_helper.unique_name(failure_filename, device=device),
538 failure_filepath,
539 bucket=render_tests_bucket)
540
541 golden_filepath = os.path.join(temp_dir, 'goldens', failure_filename)
542 if os.path.exists(golden_filepath):
543 golden_link = google_storage_helper.upload(
544 google_storage_helper.unique_name(
545 failure_filename, device=device),
546 golden_filepath,
547 bucket=render_tests_bucket)
548 else:
549 golden_link = ''
550
551 diff_filepath = os.path.join(temp_dir, 'diffs', failure_filename)
552 if os.path.exists(diff_filepath):
553 diff_link = google_storage_helper.upload(
554 google_storage_helper.unique_name(
555 failure_filename, device=device),
556 diff_filepath,
557 bucket=render_tests_bucket)
558 else:
559 diff_link = ''
560
561 with tempfile.NamedTemporaryFile(suffix='.html') as temp_html:
562 temp_html.write(RENDER_TESTS_HTML_TEMPLATE %
563 (failure_link, golden_link, diff_link))
564 temp_html.flush()
565 html_results_link = google_storage_helper.upload(
566 google_storage_helper.unique_name('render_html', device=device),
567 temp_html.name,
568 bucket=render_tests_bucket,
569 content_type='text/html')
570 for result in results:
571 result.SetLink(failure_filename, html_results_link)
572
445 #override 573 #override
446 def _ShouldRetry(self, test): 574 def _ShouldRetry(self, test):
447 if 'RetryOnFailure' in test.get('annotations', {}): 575 if 'RetryOnFailure' in test.get('annotations', {}):
448 return True 576 return True
449 577
450 # TODO(jbudorick): Remove this log message once @RetryOnFailure has been 578 # TODO(jbudorick): Remove this log message once @RetryOnFailure has been
451 # enabled for a while. See crbug.com/619055 for more details. 579 # enabled for a while. See crbug.com/619055 for more details.
452 logging.error('Default retries are being phased out. crbug.com/619055') 580 logging.error('Default retries are being phased out. crbug.com/619055')
453 return False 581 return False
454 582
(...skipping 15 matching lines...) Expand all
470 if k in annotations: 598 if k in annotations:
471 timeout = v 599 timeout = v
472 break 600 break
473 else: 601 else:
474 logging.warning('Using default 1 minute timeout for %s', test_name) 602 logging.warning('Using default 1 minute timeout for %s', test_name)
475 timeout = 60 603 timeout = 60
476 604
477 timeout *= cls._GetTimeoutScaleFromAnnotations(annotations) 605 timeout *= cls._GetTimeoutScaleFromAnnotations(annotations)
478 606
479 return timeout 607 return timeout
608
609 def _IsRenderTest(test):
610 """Determines if a test or list of tests has a RenderTest amongst them."""
611 if not isinstance(test, list):
612 test = [test]
613 return any([RENDER_TEST_FEATURE_ANNOTATION in t['annotations'].get(
614 FEATURE_ANNOTATION, ())] for t in test)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698