Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(239)

Side by Side Diff: scripts/slave/recipe_modules/chromium/steps.py

Issue 1185693002: Move builders.py and steps.py to chromium_tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Address review comments. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import re
6 import string
7
8
9 class Test(object):
10 """
11 Base class for tests that can be retried after deapplying a previously
12 applied patch.
13 """
14
15 def __init__(self):
16 super(Test, self).__init__()
17 self._test_runs = {}
18
19 @property
20 def abort_on_failure(self):
21 """If True, abort build when test fails."""
22 return False
23
24 @property
25 def name(self): # pragma: no cover
26 """Name of the test."""
27 raise NotImplementedError()
28
29 @property
30 def isolate_target(self):
31 """Returns isolate target name. Defaults to name."""
32 return self.name # pragma: no cover
33
34 @staticmethod
35 def compile_targets(api):
36 """List of compile targets needed by this test."""
37 raise NotImplementedError() # pragma: no cover
38
39 def pre_run(self, api, suffix): # pragma: no cover
40 """Steps to execute before running the test."""
41 return []
42
43 def run(self, api, suffix): # pragma: no cover
44 """Run the test. suffix is 'with patch' or 'without patch'."""
45 raise NotImplementedError()
46
47 def post_run(self, api, suffix): # pragma: no cover
48 """Steps to execute after running the test."""
49 return []
50
51 def has_valid_results(self, api, suffix): # pragma: no cover
52 """
53 Returns True if results (failures) are valid.
54
55 This makes it possible to distinguish between the case of no failures
56 and the test failing to even report its results in machine-readable
57 format.
58 """
59 raise NotImplementedError()
60
61 def failures(self, api, suffix): # pragma: no cover
62 """Return list of failures (list of strings)."""
63 raise NotImplementedError()
64
65 @property
66 def uses_swarming(self):
67 """Returns true if the test uses swarming."""
68 return False
69
70 def _step_name(self, suffix):
71 """Helper to uniformly combine tests's name with a suffix."""
72 if not suffix:
73 return self.name
74 return '%s (%s)' % (self.name, suffix)
75
76
77 class ArchiveBuildStep(Test):
78 def __init__(self, gs_bucket, gs_acl=None):
79 self.gs_bucket = gs_bucket
80 self.gs_acl = gs_acl
81
82 def run(self, api, suffix):
83 return api.chromium.archive_build(
84 'archive build',
85 self.gs_bucket,
86 gs_acl=self.gs_acl,
87 )
88
89 @staticmethod
90 def compile_targets(_):
91 return []
92
93
94 class SizesStep(Test):
95 def __init__(self, results_url, perf_id):
96 self.results_url = results_url
97 self.perf_id = perf_id
98
99 def run(self, api, suffix):
100 return api.chromium.sizes(self.results_url, self.perf_id)
101
102 @staticmethod
103 def compile_targets(_):
104 return ['chrome']
105
106
107 class ScriptTest(Test): # pylint: disable=W0232
108 """
109 Test which uses logic from script inside chromium repo.
110
111 This makes it possible to keep the logic src-side as opposed
112 to the build repo most Chromium developers are unfamiliar with.
113
114 Another advantage is being to test changes to these scripts
115 on trybots.
116
117 All new tests are strongly encouraged to use this infrastructure.
118 """
119
120 def __init__(self, name, script, all_compile_targets, script_args=None):
121 super(ScriptTest, self).__init__()
122 self._name = name
123 self._script = script
124 self._all_compile_targets = all_compile_targets
125 self._script_args = script_args
126
127 @property
128 def name(self):
129 return self._name
130
131 def compile_targets(self, api):
132 try:
133 substitutions = {'name': self._name}
134
135 return [string.Template(s).safe_substitute(substitutions)
136 for s in self._all_compile_targets[self._script]]
137 except KeyError:
138 # Not all scripts will have test data inside recipes,
139 # so return a default value.
140 # TODO(phajdan.jr): Revisit this when all script tests
141 # lists move src-side. We should be able to provide
142 # test data then.
143 if api.chromium._test_data.enabled:
144 return []
145
146 raise # pragma: no cover
147
148 def run(self, api, suffix):
149 name = self.name
150 if suffix:
151 name += ' (%s)' % suffix
152
153 run_args = []
154 if suffix == 'without patch':
155 run_args.extend([
156 '--filter-file', api.json.input(self.failures(api, 'with patch'))
157 ]) # pragma: no cover
158
159 try:
160 script_args = []
161 if self._script_args:
162 script_args = ['--args', api.json.input(self._script_args)]
163 api.python(
164 name,
165 # Enforce that all scripts are in the specified directory
166 # for consistency.
167 api.path['checkout'].join(
168 'testing', 'scripts', api.path.basename(self._script)),
169 args=(api.chromium.get_common_args_for_scripts() +
170 script_args +
171 ['run', '--output', api.json.output()] +
172 run_args),
173 step_test_data=lambda: api.json.test_api.output(
174 {'valid': True, 'failures': []}))
175 finally:
176 self._test_runs[suffix] = api.step.active_result
177
178 return self._test_runs[suffix]
179
180 def has_valid_results(self, api, suffix):
181 try:
182 # Make sure the JSON includes all necessary data.
183 self.failures(api, suffix)
184
185 return self._test_runs[suffix].json.output['valid']
186 except Exception: # pragma: no cover
187 return False
188
189 def failures(self, api, suffix):
190 return self._test_runs[suffix].json.output['failures']
191
192
193 class LocalGTestTest(Test):
194 def __init__(self, name, args=None, target_name=None, use_isolate=False,
195 revision=None, webkit_revision=None, android_isolate_path=None,
196 override_compile_targets=None, **runtest_kwargs):
197 """Constructs an instance of LocalGTestTest.
198
199 Args:
200 name: Displayed name of the test. May be modified by suffixes.
201 args: Arguments to be passed to the test.
202 target_name: Actual name of the test. Defaults to name.
203 use_isolate: When set, uses api.isolate.runtest to invoke the test.
204 Calling recipe should have isolate in their DEPS.
205 revision: Revision of the Chrome checkout.
206 webkit_revision: Revision of the WebKit checkout.
207 override_compile_targets: List of compile targets for this test
208 (for tests that don't follow target naming conventions).
209 runtest_kwargs: Additional keyword args forwarded to the runtest.
210 """
211 super(LocalGTestTest, self).__init__()
212 self._name = name
213 self._args = args or []
214 self._target_name = target_name
215 self._use_isolate = use_isolate
216 self._revision = revision
217 self._webkit_revision = webkit_revision
218 self._android_isolate_path = android_isolate_path
219 self._override_compile_targets = override_compile_targets
220 self._runtest_kwargs = runtest_kwargs
221
222 @property
223 def name(self):
224 return self._name
225
226 @property
227 def target_name(self):
228 return self._target_name or self._name
229
230 @property
231 def isolate_target(self):
232 return self.target_name # pragma: no cover
233
234 def compile_targets(self, api):
235 if self._override_compile_targets:
236 return self._override_compile_targets
237
238 if api.chromium.c.TARGET_PLATFORM == 'android':
239 return [self.target_name + '_apk']
240
241 # On iOS we rely on 'All' target being compiled instead of using
242 # individual targets.
243 if api.chromium.c.TARGET_PLATFORM == 'ios':
244 return []
245
246 return [self.target_name]
247
248 def run(self, api, suffix):
249 # Copy the list because run can be invoked multiple times and we modify
250 # the local copy.
251 args = self._args[:]
252 is_android = api.chromium.c.TARGET_PLATFORM == 'android'
253
254 kwargs = {}
255 if suffix == 'without patch':
256 failures = self.failures(api, 'with patch')
257 if is_android:
258 kwargs['gtest_filter'] = ':'.join(failures) # pragma: no cover
259 else:
260 args.append(api.chromium.test_launcher_filter(failures))
261
262 gtest_results_file = api.test_utils.gtest_results(add_json_log=False)
263 step_test_data = lambda: api.test_utils.test_api.canned_gtest_output(True)
264
265 kwargs['name'] = self._step_name(suffix)
266 kwargs['args'] = args
267 kwargs['step_test_data'] = step_test_data
268
269 if is_android:
270 # TODO(sergiyb): Figure out if we can reuse isolate module for running
271 # isolated Android tests, rather than using custom solution in Android
272 # test launcher.
273 if self._android_isolate_path:
274 isolate_path = api.path['checkout'].join(self._android_isolate_path)
275 kwargs['isolate_file_path'] = isolate_path
276 kwargs['json_results_file'] = gtest_results_file
277 kwargs['flakiness_dashboard'] = 'http://test-results.appspot.com'
278 else:
279 kwargs['xvfb'] = True
280 kwargs['test_type'] = self.name
281 kwargs['annotate'] = 'gtest'
282 kwargs['test_launcher_summary_output'] = gtest_results_file
283 kwargs.update(self._runtest_kwargs)
284
285 try:
286 if is_android:
287 api.chromium_android.run_test_suite(self.target_name, **kwargs)
288 elif self._use_isolate:
289 api.isolate.runtest(self.target_name, self._revision,
290 self._webkit_revision, **kwargs)
291 else:
292 api.chromium.runtest(self.target_name, revision=self._revision,
293 webkit_revision=self._webkit_revision, **kwargs)
294 finally:
295 step_result = api.step.active_result
296 self._test_runs[suffix] = step_result
297
298 if hasattr(step_result, 'test_utils'):
299 r = step_result.test_utils.gtest_results
300 p = step_result.presentation
301
302 if r.valid:
303 p.step_text += api.test_utils.format_step_text([
304 ['failures:', r.failures]
305 ])
306
307 return step_result
308
309 def has_valid_results(self, api, suffix):
310 if suffix not in self._test_runs:
311 return False # pragma: no cover
312 if not hasattr(self._test_runs[suffix], 'test_utils'):
313 return False # pragma: no cover
314 gtest_results = self._test_runs[suffix].test_utils.gtest_results
315 if not gtest_results.valid: # pragma: no cover
316 return False
317 global_tags = gtest_results.raw.get('global_tags', [])
318 return 'UNRELIABLE_RESULTS' not in global_tags
319
320 def failures(self, api, suffix):
321 return self._test_runs[suffix].test_utils.gtest_results.failures
322
323
324 def generate_gtest(api, mastername, buildername, test_spec,
325 enable_swarming=False, scripts_compile_targets=None):
326 def canonicalize_test(test):
327 if isinstance(test, basestring):
328 canonical_test = {'test': test}
329 else:
330 canonical_test = test.copy()
331
332 canonical_test.setdefault('shard_index', 0)
333 canonical_test.setdefault('total_shards', 1)
334 return canonical_test
335
336 def get_tests(api):
337 return [canonicalize_test(t) for t in
338 test_spec.get(buildername, {}).get('gtest_tests', [])]
339
340 for test in get_tests(api):
341 args = test.get('args', [])
342 if test['shard_index'] != 0 or test['total_shards'] != 1:
343 args.extend(['--test-launcher-shard-index=%d' % test['shard_index'],
344 '--test-launcher-total-shards=%d' % test['total_shards']])
345 use_swarming = False
346 swarming_shards = 1
347 if enable_swarming:
348 swarming_spec = test.get('swarming', {})
349 if swarming_spec.get('can_use_on_swarming_builders'):
350 use_swarming = True
351 swarming_shards = swarming_spec.get('shards', 1)
352 yield GTestTest(str(test['test']), args=args, flakiness_dash=True,
353 enable_swarming=use_swarming,
354 swarming_shards=swarming_shards)
355
356
357 def generate_script(api, mastername, buildername, test_spec,
358 enable_swarming=False, scripts_compile_targets=None):
359 for script_spec in test_spec.get(buildername, {}).get('scripts', []):
360 yield ScriptTest(
361 str(script_spec['name']),
362 script_spec['script'],
363 scripts_compile_targets, # pragma: no cover
364 script_spec.get('args', []))
365
366
367 class DynamicPerfTests(Test):
368 def __init__(self, browser, perf_id, shard_index, num_shards):
369 self.browser = browser
370 self.perf_id = perf_id
371 self.shard_index = shard_index
372 self.num_shards = num_shards
373
374 @property
375 def name(self):
376 return 'dynamic_perf_tests'
377
378 def run(self, api, suffix):
379 exception = None
380 tests = api.chromium.list_perf_tests(self.browser, self.num_shards)
381 tests = dict((k, v) for k, v in tests.json.output['steps'].iteritems()
382 if v['device_affinity'] == self.shard_index)
383 for test_name, test in sorted(tests.iteritems()):
384 test_name = str(test_name)
385 annotate = api.chromium.get_annotate_by_test_name(test_name)
386 cmd = test['cmd'].split()
387 try:
388 api.chromium.runtest(
389 cmd[1] if len(cmd) > 1 else cmd[0],
390 args=cmd[2:],
391 name=test_name,
392 annotate=annotate,
393 python_mode=True,
394 results_url='https://chromeperf.appspot.com',
395 perf_dashboard_id=test.get('perf_dashboard_id', test_name),
396 perf_id=self.perf_id,
397 test_type=test.get('perf_dashboard_id', test_name),
398 xvfb=True,
399 chartjson_file=True)
400 except api.step.StepFailure as f:
401 exception = f
402 if exception:
403 raise exception
404
405 @staticmethod
406 def compile_targets(_):
407 return []
408
409
410 class AndroidPerfTests(Test):
411 def __init__(self, perf_id, num_shards):
412 self.perf_id = perf_id
413 self.num_shards = num_shards
414
415 def run(self, api, suffix):
416 exception = None
417 api.adb.list_devices(step_test_data=api.adb.test_api.two_devices)
418 perf_tests = api.chromium.list_perf_tests(
419 browser='android-chrome-shell',
420 num_shards=self.num_shards,
421 devices=api.adb.devices[0:1]).json.output
422 try:
423 api.chromium_android.run_sharded_perf_tests(
424 config=api.json.input(data=perf_tests),
425 perf_id=self.perf_id)
426 except api.step.StepFailure as f: # pragma: no cover
427 exception = f
428 if exception:
429 raise exception # pragma: no cover
430
431 @staticmethod
432 def compile_targets(_):
433 return []
434
435
436 class SwarmingTest(Test):
437 def __init__(self, name, dimensions=None, tags=None, target_name=None,
438 extra_suffix=None):
439 self._name = name
440 self._tasks = {}
441 self._results = {}
442 self._target_name = target_name
443 self._dimensions = dimensions
444 self._tags = tags
445 self._extra_suffix = extra_suffix
446
447 @property
448 def name(self):
449 if self._extra_suffix:
450 return '%s %s' % (self._name, self._extra_suffix)
451 else:
452 return self._name
453
454 @property
455 def target_name(self):
456 return self._target_name or self._name
457
458 @property
459 def isolate_target(self):
460 return self.target_name
461
462 def create_task(self, api, suffix, isolated_hash):
463 """Creates a swarming task. Must be overridden in subclasses.
464
465 Args:
466 api: Caller's API.
467 suffix: Suffix added to the test name.
468 isolated_hash: Hash of the isolated test to be run.
469
470 Returns:
471 A SwarmingTask object.
472 """
473 raise NotImplementedError() # pragma: no cover
474
475 def pre_run(self, api, suffix):
476 """Launches the test on Swarming."""
477 assert suffix not in self._tasks, (
478 'Test %s was already triggered' % self._step_name(suffix))
479
480 # *.isolated may be missing if *_run target is misconfigured. It's a error
481 # in gyp, not a recipe failure. So carry on with recipe execution.
482 isolated_hash = api.isolate.isolated_tests.get(self.target_name)
483 if not isolated_hash:
484 return api.python.inline(
485 '[error] %s' % self._step_name(suffix),
486 r"""
487 import sys
488 print '*.isolated file for target %s is missing' % sys.argv[1]
489 sys.exit(1)
490 """,
491 args=[self.target_name])
492
493 # Create task.
494 self._tasks[suffix] = self.create_task(api, suffix, isolated_hash)
495
496 # Add custom dimensions.
497 if self._dimensions:
498 self._tasks[suffix].dimensions.update(self._dimensions)
499
500 # Add config-specific tags.
501 self._tasks[suffix].tags.update(api.chromium.c.runtests.swarming_tags)
502
503 # Add custom tags.
504 if self._tags:
505 self._tasks[suffix].tags.update(self._tags)
506
507 # Set default value.
508 if 'os' not in self._tasks[suffix].dimensions:
509 self._tasks[suffix].dimensions['os'] = api.swarming.prefered_os_dimension(
510 api.platform.name)
511
512 return api.swarming.trigger_task(self._tasks[suffix])
513
514 def run(self, api, suffix): # pylint: disable=R0201
515 """Not used. All logic in pre_run, post_run."""
516 return []
517
518 def validate_task_results(self, api, step_result):
519 """Interprets output of a task (provided as StepResult object).
520
521 Called for successful and failed tasks.
522
523 Args:
524 api: Caller's API.
525 step_result: StepResult object to examine.
526
527 Returns:
528 A tuple (valid, failures), where valid is True if valid results are
529 available and failures is a list of names of failed tests (ignored if
530 valid is False).
531 """
532 raise NotImplementedError() # pragma: no cover
533
534 def post_run(self, api, suffix):
535 """Waits for launched test to finish and collects the results."""
536 assert suffix not in self._results, (
537 'Results of %s were already collected' % self._step_name(suffix))
538
539 # Emit error if test wasn't triggered. This happens if *.isolated is not
540 # found. (The build is already red by this moment anyway).
541 if suffix not in self._tasks:
542 return api.python.inline(
543 '[collect error] %s' % self._step_name(suffix),
544 r"""
545 import sys
546 print '%s wasn\'t triggered' % sys.argv[1]
547 sys.exit(1)
548 """,
549 args=[self.target_name])
550
551 try:
552 api.swarming.collect_task(self._tasks[suffix])
553 finally:
554 valid, failures = self.validate_task_results(api, api.step.active_result)
555 self._results[suffix] = {'valid': valid, 'failures': failures}
556
557 def has_valid_results(self, api, suffix):
558 # Test wasn't triggered or wasn't collected.
559 if suffix not in self._tasks or not suffix in self._results:
560 return False
561 return self._results[suffix]['valid']
562
563 def failures(self, api, suffix):
564 assert self.has_valid_results(api, suffix)
565 return self._results[suffix]['failures']
566
567 @property
568 def uses_swarming(self):
569 return True
570
571
572 class SwarmingGTestTest(SwarmingTest):
573 def __init__(self, name, args=None, target_name=None, shards=1,
574 dimensions=None, tags=None, extra_suffix=None):
575 super(SwarmingGTestTest, self).__init__(name, dimensions, tags, target_name,
576 extra_suffix)
577 self._args = args or []
578 self._shards = shards
579
580 def compile_targets(self, api):
581 # <X>_run target depends on <X>, and then isolates it invoking isolate.py.
582 # It is a convention, not a hard coded rule.
583 # Also include name without the _run suffix to help recipes correctly
584 # interpret results returned by "analyze".
585 return [self.target_name, self.target_name + '_run']
586
587 def create_task(self, api, suffix, isolated_hash):
588 # For local tests test_args are added inside api.chromium.runtest.
589 args = self._args[:]
590 args.extend(api.chromium.c.runtests.test_args)
591
592 # If rerunning without a patch, run only tests that failed.
593 if suffix == 'without patch':
594 failed_tests = sorted(self.failures(api, 'with patch'))
595 args.append('--gtest_filter=%s' % ':'.join(failed_tests))
596
597 args.extend(api.chromium.c.runtests.swarming_extra_args)
598
599 return api.swarming.gtest_task(
600 title=self._step_name(suffix),
601 isolated_hash=isolated_hash,
602 shards=self._shards,
603 test_launcher_summary_output=api.test_utils.gtest_results(add_json_log=F alse),
604 extra_args=args)
605
606 def validate_task_results(self, api, step_result):
607 if not hasattr(step_result, 'test_utils'):
608 return False, None # pragma: no cover
609
610 gtest_results = step_result.test_utils.gtest_results
611 if not gtest_results:
612 return False, None # pragma: no cover
613
614 global_tags = gtest_results.raw.get('global_tags', [])
615 if 'UNRELIABLE_RESULTS' in global_tags:
616 return False, None # pragma: no cover
617
618 return True, gtest_results.failures
619
620
621 class GTestTest(Test):
622 def __init__(self, name, args=None, target_name=None, enable_swarming=False,
623 swarming_shards=1, swarming_dimensions=None, swarming_tags=None,
624 swarming_extra_suffix=None, **runtest_kwargs):
625 super(GTestTest, self).__init__()
626 if enable_swarming:
627 self._test = SwarmingGTestTest(
628 name, args, target_name, swarming_shards, swarming_dimensions,
629 swarming_tags, swarming_extra_suffix)
630 else:
631 self._test = LocalGTestTest(name, args, target_name, **runtest_kwargs)
632
633 @property
634 def name(self):
635 return self._test.name
636
637 @property
638 def isolate_target(self):
639 return self._test.isolate_target
640
641 def compile_targets(self, api):
642 return self._test.compile_targets(api)
643
644 def pre_run(self, api, suffix):
645 return self._test.pre_run(api, suffix)
646
647 def run(self, api, suffix):
648 return self._test.run(api, suffix)
649
650 def post_run(self, api, suffix):
651 return self._test.post_run(api, suffix)
652
653 def has_valid_results(self, api, suffix):
654 return self._test.has_valid_results(api, suffix)
655
656 def failures(self, api, suffix):
657 return self._test.failures(api, suffix)
658
659 @property
660 def uses_swarming(self):
661 return self._test.uses_swarming
662
663
664 # TODO(sergiyb): GPU Tests do not always follow the Chromium convention to have
665 # a 'test' target for each 'test_run' target. Instead they use gyp dependencies.
666 # Chromium tests return both 'test' and 'test_run' to circumvent and issue with
667 # analyze step, while GPU tests do not require this.
668 class GPUGTestTest(GTestTest):
669 def __init__(self, name, **kwargs):
670 kwargs['swarming_tags'] = set(kwargs.get('swarming_tags') or [])
671 kwargs['swarming_tags'] |= {'gpu_test:1'}
672 super(GPUGTestTest, self).__init__(name, **kwargs)
673
674 def compile_targets(self, api):
675 return ['%s_run' % self._test.target_name]
676
677
678 class PythonBasedTest(Test):
679 @staticmethod
680 def compile_targets(_):
681 return [] # pragma: no cover
682
683 def run_step(self, api, suffix, cmd_args, **kwargs):
684 raise NotImplementedError() # pragma: no cover
685
686 def run(self, api, suffix):
687 cmd_args = ['--write-full-results-to',
688 api.test_utils.test_results(add_json_log=False)]
689 if suffix == 'without patch':
690 cmd_args.extend(self.failures(api, 'with patch')) # pragma: no cover
691
692 try:
693 self.run_step(
694 api,
695 suffix,
696 cmd_args,
697 step_test_data=lambda: api.test_utils.test_api.canned_test_output(True ))
698 finally:
699 step_result = api.step.active_result
700 self._test_runs[suffix] = step_result
701
702 if hasattr(step_result, 'test_utils'):
703 r = step_result.test_utils.test_results
704 p = step_result.presentation
705 p.step_text += api.test_utils.format_step_text([
706 ['unexpected_failures:', r.unexpected_failures.keys()],
707 ])
708
709 return step_result
710
711 def has_valid_results(self, api, suffix):
712 # TODO(dpranke): we should just return zero/nonzero for success/fail.
713 # crbug.com/357866
714 step = self._test_runs[suffix]
715 if not hasattr(step, 'test_utils'):
716 return False # pragma: no cover
717 return (step.test_utils.test_results.valid and
718 step.retcode <= step.test_utils.test_results.MAX_FAILURES_EXIT_STATU S and
719 (step.retcode == 0) or self.failures(api, suffix))
720
721 def failures(self, api, suffix):
722 return self._test_runs[suffix].test_utils.test_results.unexpected_failures
723
724
725 class PrintPreviewTests(PythonBasedTest): # pylint: disable=W032
726 name = 'print_preview_tests'
727
728 def run_step(self, api, suffix, cmd_args, **kwargs):
729 platform_arg = '.'.join(['browser_test',
730 api.platform.normalize_platform_name(api.platform.name)])
731 args = cmd_args
732 path = api.path['checkout'].join(
733 'third_party', 'WebKit', 'Tools', 'Scripts', 'run-webkit-tests')
734 args.extend(['--platform', platform_arg])
735
736 # This is similar to how api.chromium.run_telemetry_test() sets the
737 # environment variable for the sandbox.
738 env = {}
739 if api.platform.is_linux:
740 env['CHROME_DEVEL_SANDBOX'] = api.path.join(
741 '/opt', 'chromium', 'chrome_sandbox')
742
743 return api.chromium.runtest(
744 test=path,
745 args=args,
746 xvfb=True,
747 name=self._step_name(suffix),
748 python_mode=True,
749 env=env,
750 **kwargs)
751
752 @staticmethod
753 def compile_targets(api):
754 targets = ['browser_tests', 'blink_tests']
755 if api.platform.is_win:
756 targets.append('crash_service')
757
758 return targets
759
760
761 class TelemetryGPUTest(Test): # pylint: disable=W0232
762 def __init__(self, name, revision=None, webkit_revision=None,
763 target_name=None, args=None, enable_swarming=False,
764 swarming_dimensions=None, swarming_extra_suffix=None,
765 **runtest_kwargs):
766 if enable_swarming:
767 self._test = SwarmingTelemetryGPUTest(
768 name, args=args, dimensions=swarming_dimensions,
769 target_name=target_name, extra_suffix=swarming_extra_suffix)
770 else:
771 self._test = LocalTelemetryGPUTest(
772 name, revision, webkit_revision, args=args, target_name=target_name,
773 **runtest_kwargs)
774
775 @property
776 def name(self):
777 return self._test.name
778
779 @property
780 def isolate_target(self):
781 return self._test.isolate_target
782
783 def compile_targets(self, api):
784 return self._test.compile_targets(api)
785
786 def pre_run(self, api, suffix):
787 return self._test.pre_run(api, suffix)
788
789 def run(self, api, suffix):
790 return self._test.run(api, suffix)
791
792 def post_run(self, api, suffix):
793 return self._test.post_run(api, suffix)
794
795 def has_valid_results(self, api, suffix):
796 return self._test.has_valid_results(api, suffix)
797
798 def failures(self, api, suffix):
799 return self._test.failures(api, suffix)
800
801 @property
802 def uses_swarming(self):
803 return self._test.uses_swarming
804
805
806 class BisectTest(Test): # pylint: disable=W0232
807 name = 'bisect_test'
808
809 @property
810 def abort_on_failure(self):
811 return True # pragma: no cover
812
813 @staticmethod
814 def compile_targets(_): # pragma: no cover
815 return ['chrome'] # Bisect always uses a separate bot for building.
816
817 def pre_run(self, api, _):
818 self.test_config = api.bisect_tester.load_config_from_dict(
819 api.properties.get('bisect_config'))
820
821 def run(self, api, _):
822 self._run_results, self.test_output, self.retcodes = (
823 api.bisect_tester.run_test(self.test_config))
824
825 def post_run(self, api, _):
826 self.values = api.bisect_tester.digest_run_results(self._run_results,
827 self.test_config)
828 api.bisect_tester.upload_results(self.test_output, self.values,
829 self.retcodes)
830
831 def has_valid_results(self, *_):
832 return len(getattr(self, 'values', [])) > 0 # pragma: no cover
833
834 def failures(self, *_):
835 return self._failures # pragma: no cover
836
837
838 class LocalTelemetryGPUTest(Test): # pylint: disable=W0232
839 def __init__(self, name, revision, webkit_revision,
840 target_name=None, **runtest_kwargs):
841 """Constructs an instance of LocalTelemetryGPUTest.
842
843 Args:
844 name: Displayed name of the test. May be modified by suffixes.
845 revision: Revision of the Chrome checkout.
846 webkit_revision: Revision of the WebKit checkout.
847 target_name: Actual name of the test. Defaults to name.
848 runtest_kwargs: Additional keyword args forwarded to the runtest.
849 """
850 super(LocalTelemetryGPUTest, self).__init__()
851 self._name = name
852 self._target_name = target_name
853 self._revision = revision
854 self._webkit_revision = webkit_revision
855 self._failures = {}
856 self._valid = {}
857 self._runtest_kwargs = runtest_kwargs
858
859 @property
860 def name(self):
861 return self._name
862
863 @property
864 def target_name(self):
865 return self._target_name or self._name
866
867 @property
868 def isolate_target(self):
869 return self.target_name # pragma: no cover
870
871 def compile_targets(self, _):
872 # TODO(sergiyb): Build 'chrome_shell_apk' instead of 'chrome' on Android.
873 return ['chrome', 'telemetry_gpu_test_run'] # pragma: no cover
874
875 def run(self, api, suffix): # pylint: disable=R0201
876 kwargs = self._runtest_kwargs.copy()
877 kwargs['args'].extend(['--output-format', 'json',
878 '--output-dir', api.raw_io.output_dir()])
879 step_test_data=lambda: api.test_utils.test_api.canned_telemetry_gpu_output(
880 passing=False, is_win=api.platform.is_win)
881 try:
882 api.isolate.run_telemetry_test(
883 'telemetry_gpu_test',
884 self.target_name,
885 self._revision,
886 self._webkit_revision,
887 name=self._step_name(suffix),
888 spawn_dbus=True,
889 step_test_data=step_test_data,
890 **self._runtest_kwargs)
891 finally:
892 step_result = api.step.active_result
893 self._test_runs[suffix] = step_result
894
895 try:
896 res = api.json.loads(step_result.raw_io.output_dir['results.json'])
897 self._failures[suffix] = [res['pages'][str(value['page_id'])]['name']
898 for value in res['per_page_values']
899 if value['type'] == 'failure']
900
901 self._valid[suffix] = True
902 except (ValueError, KeyError, AttributeError): # pragma: no cover
903 self._valid[suffix] = False
904
905 if self._valid[suffix]:
906 step_result.presentation.step_text += api.test_utils.format_step_text([
907 ['failures:', self._failures[suffix]]
908 ])
909
910 def has_valid_results(self, api, suffix):
911 return suffix in self._valid and self._valid[suffix] # pragma: no cover
912
913 def failures(self, api, suffix): # pragma: no cover
914 assert self.has_valid_results(api, suffix)
915 assert suffix in self._failures
916 return self._failures[suffix]
917
918
919 class SwarmingTelemetryGPUTest(SwarmingTest):
920 def __init__(self, name, args=None, dimensions=None, target_name=None,
921 extra_suffix=None):
922 super(SwarmingTelemetryGPUTest, self).__init__(
923 name, dimensions, {'gpu_test:1'}, 'telemetry_gpu_test', extra_suffix)
924 self._args = args
925 self._telemetry_target_name = target_name or name
926
927 def compile_targets(self, _):
928 # TODO(sergiyb): Build 'chrome_shell_apk' instead of 'chrome' on Android.
929 return ['chrome', 'telemetry_gpu_test_run']
930
931 def create_task(self, api, suffix, isolated_hash):
932 # For local tests args are added inside api.chromium.run_telemetry_test.
933 browser_config = api.chromium.c.BUILD_CONFIG.lower()
934 args = [self._telemetry_target_name, '--show-stdout',
935 '--browser=%s' % browser_config] + self._args
936
937 # If rerunning without a patch, run only tests that failed.
938 if suffix == 'without patch':
939 failed_tests = sorted(self.failures(api, 'with patch'))
940 # Telemetry test launcher uses re.compile to parse --page-filter argument,
941 # therefore we escape any special characters in test names.
942 failed_tests = [re.escape(test_name) for test_name in failed_tests]
943 args.append('--page-filter=%s' % '|'.join(failed_tests))
944
945 return api.swarming.telemetry_gpu_task(
946 title=self._step_name(suffix), isolated_hash=isolated_hash,
947 extra_args=args)
948
949 def validate_task_results(self, api, step_result):
950 results = getattr(step_result, 'telemetry_results', None) or {}
951
952 try:
953 failures = [results['pages'][str(value['page_id'])]['name']
954 for value in results['per_page_values']
955 if value['type'] == 'failure']
956
957 valid = True
958 except (ValueError, KeyError): # pragma: no cover
959 valid = False
960 failures = None
961
962 if valid:
963 step_result.presentation.step_text += api.test_utils.format_step_text([
964 ['failures:', failures]
965 ])
966
967 return valid, failures
968
969
970 class AndroidTest(Test):
971 def __init__(self, name, compile_target, adb_install_apk=None,
972 isolate_file_path=None):
973 super(AndroidTest, self).__init__()
974
975 self._name = name
976 self.compile_target = compile_target
977
978 self.adb_install_apk = adb_install_apk
979 self.isolate_file_path = isolate_file_path
980
981 @property
982 def name(self):
983 return self._name
984
985 def _get_failing_tests(self, step_result):
986 """Parses test results and returns a list of failed tests.
987
988 Args:
989 step_result: Result returned from the test.
990
991 Returns:
992 None if results are invalid, a list of failures otherwise (may be empty).
993 """
994 try:
995 # Extract test results.
996 json_results = step_result.json.output
997 test_results = {test_name: test_data[0]['status']
998 for result_dict in json_results['per_iteration_data']
999 for test_name, test_data in result_dict.iteritems()}
1000
1001 # TODO(sergiyb): Figure out how to handle status UNKNOWN.
1002 return [test_name for test_name, test_status in test_results.iteritems()
1003 if test_status not in ['SUCCESS', 'SKIPPED']]
1004 except (KeyError, IndexError, TypeError,
1005 AttributeError): # pragma: no cover
1006 return None
1007
1008 def run_tests(self, api, suffix, json_results_file):
1009 """Runs the Android test suite and outputs the json results to a file.
1010
1011 Args:
1012 api: Caller's API.
1013 suffix: Suffix added to the test name.
1014 json_results_file: File to output the test results.
1015 """
1016 raise NotImplementedError() # pragma: no cover
1017
1018 def run(self, api, suffix):
1019 assert api.chromium.c.TARGET_PLATFORM == 'android'
1020 if self.adb_install_apk:
1021 api.chromium_android.adb_install_apk(
1022 self.adb_install_apk[0], self.adb_install_apk[1])
1023 try:
1024 json_results_file = api.json.output(add_json_log=False)
1025 self.run_tests(api, suffix, json_results_file)
1026 finally:
1027 step_result = api.step.active_result
1028 failures = self._get_failing_tests(step_result)
1029
1030 if failures is None:
1031 self._test_runs[suffix] = {'valid': False} # pragma: no cover
1032 else:
1033 self._test_runs[suffix] = {'valid': True, 'failures': failures}
1034
1035 step_result.presentation.step_text += api.test_utils.format_step_text([
1036 ['failures:', failures]
1037 ])
1038
1039 def compile_targets(self, _):
1040 return [self.compile_target]
1041
1042 def has_valid_results(self, api, suffix):
1043 if suffix not in self._test_runs:
1044 return False # pragma: no cover
1045 return self._test_runs[suffix]['valid']
1046
1047 def failures(self, api, suffix):
1048 assert self.has_valid_results(api, suffix)
1049 return self._test_runs[suffix]['failures']
1050
1051
1052 class AndroidJunitTest(AndroidTest):
1053 def __init__(self, name):
1054 super(AndroidJunitTest, self).__init__(name, compile_target=name,
1055 adb_install_apk=None, isolate_file_path=None)
1056
1057 #override
1058 def run_tests(self, api, suffix, json_results_file):
1059 mock_test_results = {
1060 'per_iteration_data': [{'TestA': [{'status': 'SUCCESS'}]},
1061 {'TestB': [{'status': 'FAILURE'}]}]
1062 }
1063 api.chromium_android.run_java_unit_test_suite(
1064 self.name, verbose=True, suffix=suffix,
1065 json_results_file=json_results_file,
1066 step_test_data=lambda: api.json.test_api.output(mock_test_results))
1067
1068
1069 class AndroidInstrumentationTest(AndroidTest):
1070 def __init__(self, name, compile_target, adb_install_apk=None,
1071 isolate_file_path=None):
1072 super(AndroidInstrumentationTest, self).__init__(name, compile_target,
1073 adb_install_apk, isolate_file_path)
1074
1075 #override
1076 def run_tests(self, api, suffix, json_results_file):
1077 mock_test_results = {
1078 'per_iteration_data': [{'TestA': [{'status': 'SUCCESS'}]},
1079 {'TestB': [{'status': 'FAILURE'}]}]
1080 }
1081 api.chromium_android.run_instrumentation_suite(
1082 self.name, suffix=suffix,
1083 flakiness_dashboard='http://test-results.appspot.com',
1084 verbose=True, isolate_file_path=self.isolate_file_path,
1085 json_results_file=json_results_file,
1086 step_test_data=lambda: api.json.test_api.output(mock_test_results))
1087
1088
1089 class BlinkTest(Test):
1090 # TODO(dpranke): This should be converted to a PythonBasedTest, although it
1091 # will need custom behavior because we archive the results as well.
1092 def __init__(self, extra_args=None):
1093 super(BlinkTest, self).__init__()
1094 self._extra_args = extra_args
1095
1096 name = 'webkit_tests'
1097
1098 @staticmethod
1099 def compile_targets(api):
1100 return ['blink_tests']
1101
1102 def run(self, api, suffix):
1103 results_dir = api.path['slave_build'].join('layout-test-results')
1104
1105 args = [
1106 '--target', api.chromium.c.BUILD_CONFIG,
1107 '-o', results_dir,
1108 '--build-dir', api.chromium.c.build_dir,
1109 '--json-test-results', api.test_utils.test_results(add_json_log=False),
1110 '--test-results-server', 'test-results.appspot.com',
1111 '--build-number', str(api.properties['buildnumber']),
1112 '--builder-name', api.properties['buildername'],
1113 ]
1114 if api.chromium.c.TARGET_PLATFORM == 'android':
1115 args.extend(['--platform', 'android'])
1116 if self._extra_args:
1117 args.extend(self._extra_args)
1118 if suffix == 'without patch':
1119 test_list = "\n".join(self.failures(api, 'with patch'))
1120 args.extend(['--test-list', api.raw_io.input(test_list),
1121 '--skipped', 'always'])
1122
1123 try:
1124 step_result = api.chromium.runtest(
1125 api.path['build'].join('scripts', 'slave', 'chromium',
1126 'layout_test_wrapper.py'),
1127 args, name=self._step_name(suffix),
1128 step_test_data=lambda: api.test_utils.test_api.canned_test_output(
1129 passing=True, minimal=True))
1130
1131 # Mark steps with unexpected flakes as warnings. Do this here instead of
1132 # "finally" blocks because we only want to do this if step was successful.
1133 # We don't want to possibly change failing steps to warnings.
1134 if step_result and step_result.test_utils.test_results.unexpected_flakes:
1135 step_result.presentation.status = api.step.WARNING
1136 finally:
1137 step_result = api.step.active_result
1138 self._test_runs[suffix] = step_result
1139
1140 if step_result:
1141 r = step_result.test_utils.test_results
1142 p = step_result.presentation
1143
1144 if r.valid:
1145 p.step_text += api.test_utils.format_step_text([
1146 ['unexpected_flakes:', r.unexpected_flakes.keys()],
1147 ['unexpected_failures:', r.unexpected_failures.keys()],
1148 ['Total executed: %s' % r.num_passes],
1149 ])
1150
1151 if suffix in ('', 'with patch'):
1152 buildername = api.properties['buildername']
1153 buildnumber = api.properties['buildnumber']
1154
1155 archive_layout_test_results = api.path['build'].join(
1156 'scripts', 'slave', 'chromium', 'archive_layout_test_results.py')
1157
1158 archive_result = api.python(
1159 'archive_webkit_tests_results',
1160 archive_layout_test_results,
1161 [
1162 '--results-dir', results_dir,
1163 '--build-dir', api.chromium.c.build_dir,
1164 '--build-number', buildnumber,
1165 '--builder-name', buildername,
1166 '--gs-bucket', 'gs://chromium-layout-test-archives',
1167 ] + api.json.property_args(),
1168 )
1169
1170 # TODO(infra): http://crbug.com/418946 .
1171 sanitized_buildername = re.sub('[ .()]', '_', buildername)
1172 base = (
1173 "https://storage.googleapis.com/chromium-layout-test-archives/%s/%s"
1174 % (sanitized_buildername, buildnumber))
1175
1176 archive_result.presentation.links['layout_test_results'] = (
1177 base + '/layout-test-results/results.html')
1178 archive_result.presentation.links['(zip)'] = (
1179 base + '/layout-test-results.zip')
1180
1181 def has_valid_results(self, api, suffix):
1182 step = self._test_runs[suffix]
1183 # TODO(dpranke): crbug.com/357866 - note that all comparing against
1184 # MAX_FAILURES_EXIT_STATUS tells us is that we did not exit early
1185 # or abnormally; it does not tell us how many failures there actually
1186 # were, which might be much higher (up to 5000 diffs, where we
1187 # would bail out early with --exit-after-n-failures) or lower
1188 # if we bailed out after 100 crashes w/ -exit-after-n-crashes, in
1189 # which case the retcode is actually 130
1190 return (step.test_utils.test_results.valid and
1191 step.retcode <= step.test_utils.test_results.MAX_FAILURES_EXIT_STATU S)
1192
1193 def failures(self, api, suffix):
1194 return self._test_runs[suffix].test_utils.test_results.unexpected_failures
1195
1196
1197 class MiniInstallerTest(PythonBasedTest): # pylint: disable=W0232
1198 name = 'test_installer'
1199
1200 @staticmethod
1201 def compile_targets(_):
1202 return ['mini_installer']
1203
1204 def run_step(self, api, suffix, cmd_args, **kwargs):
1205 test_path = api.path['checkout'].join('chrome', 'test', 'mini_installer')
1206 args = [
1207 '--build-dir', api.chromium.c.build_dir,
1208 '--target', api.chromium.c.build_config_fs,
1209 '--force-clean',
1210 '--config', test_path.join('config', 'config.config'),
1211 ]
1212 args.extend(cmd_args)
1213 return api.python(
1214 self._step_name(suffix),
1215 test_path.join('test_installer.py'),
1216 args,
1217 **kwargs)
1218
1219
1220 class DiagnoseGomaTest(Test):
1221 name = 'diagnose_goma'
1222
1223 @property
1224 def name(self):
1225 return 'diagnose_goma'
1226
1227 def run(self, api, suffix):
1228 diagnose_goma_log_py = api.path['build'].join('goma',
1229 'diagnose_goma_log.py')
1230 api.python('diagnose_goma', diagnose_goma_log_py, [])
1231
1232
1233 IOS_TESTS = [
1234 GTestTest('ios_chrome_unittests'),
1235 GTestTest('base_unittests'),
1236 GTestTest('components_unittests'),
1237 GTestTest('crypto_unittests'),
1238 GTestTest('gfx_unittests'),
1239 GTestTest('url_unittests'),
1240 GTestTest('content_unittests'),
1241 GTestTest('net_unittests'),
1242 GTestTest('ui_base_unittests'),
1243 GTestTest('ui_ios_unittests'),
1244 GTestTest('sync_unit_tests'),
1245 GTestTest('sql_unittests'),
1246 ]
1247
1248 GOMA_TESTS = [
1249 GTestTest('base_unittests'),
1250 GTestTest('content_unittests'),
1251 ]
OLDNEW
« no previous file with comments | « scripts/slave/recipe_modules/chromium/client_v8_fyi.py ('k') | scripts/slave/recipe_modules/chromium/test_api.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698