OLD | NEW |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import collections | 5 import collections |
6 import copy | 6 import copy |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import pickle | 9 import pickle |
10 import re | 10 import re |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass') | 43 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass') |
44 _EXTRA_TIMEOUT_SCALE = ( | 44 _EXTRA_TIMEOUT_SCALE = ( |
45 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale') | 45 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale') |
46 | 46 |
47 _PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' | 47 _PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' |
48 _PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' | 48 _PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' |
49 _NATIVE_CRASH_RE = re.compile('native crash', re.IGNORECASE) | 49 _NATIVE_CRASH_RE = re.compile('native crash', re.IGNORECASE) |
50 _PICKLE_FORMAT_VERSION = 10 | 50 _PICKLE_FORMAT_VERSION = 10 |
51 | 51 |
52 | 52 |
| 53 class ProguardPickleException(Exception): |
| 54 pass |
| 55 |
| 56 |
53 # TODO(jbudorick): Make these private class methods of | 57 # TODO(jbudorick): Make these private class methods of |
54 # InstrumentationTestInstance once the instrumentation test_runner is | 58 # InstrumentationTestInstance once the instrumentation test_runner is |
55 # deprecated. | 59 # deprecated. |
56 def ParseAmInstrumentRawOutput(raw_output): | 60 def ParseAmInstrumentRawOutput(raw_output): |
57 """Parses the output of an |am instrument -r| call. | 61 """Parses the output of an |am instrument -r| call. |
58 | 62 |
59 Args: | 63 Args: |
60 raw_output: the output of an |am instrument -r| call as a list of lines | 64 raw_output: the output of an |am instrument -r| call as a list of lines |
61 Returns: | 65 Returns: |
62 A 3-tuple containing: | 66 A 3-tuple containing: |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
182 to_remove = [] | 186 to_remove = [] |
183 for a in p.get('arguments', []): | 187 for a in p.get('arguments', []): |
184 if a['name'] == 'add': | 188 if a['name'] == 'add': |
185 to_add = ['--%s' % f for f in a['stringArray']] | 189 to_add = ['--%s' % f for f in a['stringArray']] |
186 elif a['name'] == 'remove': | 190 elif a['name'] == 'remove': |
187 to_remove = ['--%s' % f for f in a['stringArray']] | 191 to_remove = ['--%s' % f for f in a['stringArray']] |
188 result.append(ParamsTuple(to_add, to_remove)) | 192 result.append(ParamsTuple(to_add, to_remove)) |
189 return result if result else None | 193 return result if result else None |
190 | 194 |
191 | 195 |
| 196 def FilterTests(tests, test_filter=None, annotations=None, |
| 197 excluded_annotations=None): |
| 198 """Filter a list of tests |
| 199 |
| 200 Args: |
| 201 tests: a list of tests. e.g. [ |
| 202 {'annotations": {}, 'class': 'com.example.TestA', 'methods':[]}, |
| 203 {'annotations": {}, 'class': 'com.example.TestB', 'methods':[]}] |
| 204 test_filter: googletest-style filter string. |
| 205 annotations: a dict of wanted annotations for test methods. |
| 206 exclude_annotations: a dict of annotations to exclude. |
| 207 |
| 208 Return: |
| 209 A list of filtered tests |
| 210 """ |
| 211 def gtest_filter(c, m): |
| 212 t = ['%s.%s' % (c['class'].split('.')[-1], m['method'])] |
| 213 return (not test_filter |
| 214 or unittest_util.FilterTestNames(t, test_filter)) |
| 215 |
| 216 def annotation_filter(all_annotations): |
| 217 if not annotations: |
| 218 return True |
| 219 return any_annotation_matches(annotations, all_annotations) |
| 220 |
| 221 def excluded_annotation_filter(all_annotations): |
| 222 if not excluded_annotations: |
| 223 return True |
| 224 return not any_annotation_matches(excluded_annotations, |
| 225 all_annotations) |
| 226 |
| 227 def any_annotation_matches(annotations, all_annotations): |
| 228 return any( |
| 229 ak in all_annotations and (av is None or av == all_annotations[ak]) |
| 230 for ak, av in annotations.iteritems()) |
| 231 |
| 232 filtered_classes = [] |
| 233 for c in tests: |
| 234 filtered_methods = [] |
| 235 for m in c['methods']: |
| 236 # Gtest filtering |
| 237 if not gtest_filter(c, m): |
| 238 continue |
| 239 |
| 240 all_annotations = dict(c['annotations']) |
| 241 all_annotations.update(m['annotations']) |
| 242 if (not annotation_filter(all_annotations) |
| 243 or not excluded_annotation_filter(all_annotations)): |
| 244 continue |
| 245 |
| 246 filtered_methods.append(m) |
| 247 |
| 248 if filtered_methods: |
| 249 filtered_class = dict(c) |
| 250 filtered_class['methods'] = filtered_methods |
| 251 filtered_classes.append(filtered_class) |
| 252 |
| 253 return filtered_classes |
| 254 |
| 255 |
| 256 def GetAllTests(test_jar): |
| 257 pickle_path = '%s-proguard.pickle' % test_jar |
| 258 try: |
| 259 tests = _GetTestsFromPickle(pickle_path, test_jar) |
| 260 except ProguardPickleException as e: |
| 261 logging.info('Could not get tests from pickle: %s', e) |
| 262 logging.info('Getting tests from JAR via proguard.') |
| 263 tests = _GetTestsFromProguard(test_jar) |
| 264 _SaveTestsToPickle(pickle_path, test_jar, tests) |
| 265 return tests |
| 266 |
| 267 |
| 268 def _GetTestsFromPickle(pickle_path, jar_path): |
| 269 if not os.path.exists(pickle_path): |
| 270 raise ProguardPickleException('%s does not exist.' % pickle_path) |
| 271 if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path): |
| 272 raise ProguardPickleException( |
| 273 '%s newer than %s.' % (jar_path, pickle_path)) |
| 274 |
| 275 with open(pickle_path, 'r') as pickle_file: |
| 276 pickle_data = pickle.loads(pickle_file.read()) |
| 277 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] |
| 278 |
| 279 if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION: |
| 280 raise ProguardPickleException('PICKLE_FORMAT_VERSION has changed.') |
| 281 if pickle_data['JAR_MD5SUM'] != jar_md5: |
| 282 raise ProguardPickleException('JAR file MD5 sum differs.') |
| 283 return pickle_data['TEST_METHODS'] |
| 284 |
| 285 def _GetTestsFromProguard(jar_path): |
| 286 p = proguard.Dump(jar_path) |
| 287 class_lookup = dict((c['class'], c) for c in p['classes']) |
| 288 |
| 289 def is_test_class(c): |
| 290 return c['class'].endswith('Test') |
| 291 |
| 292 def is_test_method(m): |
| 293 return m['method'].startswith('test') |
| 294 |
| 295 def recursive_class_annotations(c): |
| 296 s = c['superclass'] |
| 297 if s in class_lookup: |
| 298 a = recursive_class_annotations(class_lookup[s]) |
| 299 else: |
| 300 a = {} |
| 301 a.update(c['annotations']) |
| 302 return a |
| 303 |
| 304 def stripped_test_class(c): |
| 305 return { |
| 306 'class': c['class'], |
| 307 'annotations': recursive_class_annotations(c), |
| 308 'methods': [m for m in c['methods'] if is_test_method(m)], |
| 309 } |
| 310 |
| 311 return [stripped_test_class(c) for c in p['classes'] |
| 312 if is_test_class(c)] |
| 313 |
| 314 def _SaveTestsToPickle(pickle_path, jar_path, tests): |
| 315 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] |
| 316 pickle_data = { |
| 317 'VERSION': _PICKLE_FORMAT_VERSION, |
| 318 'JAR_MD5SUM': jar_md5, |
| 319 'TEST_METHODS': tests, |
| 320 } |
| 321 with open(pickle_path, 'w') as pickle_file: |
| 322 pickle.dump(pickle_data, pickle_file) |
| 323 |
192 class InstrumentationTestInstance(test_instance.TestInstance): | 324 class InstrumentationTestInstance(test_instance.TestInstance): |
193 | 325 |
194 def __init__(self, args, isolate_delegate, error_func): | 326 def __init__(self, args, isolate_delegate, error_func): |
195 super(InstrumentationTestInstance, self).__init__() | 327 super(InstrumentationTestInstance, self).__init__() |
196 | 328 |
197 self._additional_apks = [] | 329 self._additional_apks = [] |
198 self._apk_under_test = None | 330 self._apk_under_test = None |
199 self._apk_under_test_incremental_install_script = None | 331 self._apk_under_test_incremental_install_script = None |
200 self._package_info = None | 332 self._package_info = None |
201 self._suite = None | 333 self._suite = None |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 | 407 |
276 self._test_package = self._test_apk.GetPackageName() | 408 self._test_package = self._test_apk.GetPackageName() |
277 self._test_runner = self._test_apk.GetInstrumentationName() | 409 self._test_runner = self._test_apk.GetInstrumentationName() |
278 | 410 |
279 self._package_info = None | 411 self._package_info = None |
280 if self._apk_under_test: | 412 if self._apk_under_test: |
281 package_under_test = self._apk_under_test.GetPackageName() | 413 package_under_test = self._apk_under_test.GetPackageName() |
282 for package_info in constants.PACKAGE_INFO.itervalues(): | 414 for package_info in constants.PACKAGE_INFO.itervalues(): |
283 if package_under_test == package_info.package: | 415 if package_under_test == package_info.package: |
284 self._package_info = package_info | 416 self._package_info = package_info |
| 417 break |
285 if not self._package_info: | 418 if not self._package_info: |
286 logging.warning('Unable to find package info for %s', self._test_package) | 419 logging.warning('Unable to find package info for %s', self._test_package) |
287 | 420 |
288 for apk in args.additional_apks: | 421 for apk in args.additional_apks: |
289 if not os.path.exists(apk): | 422 if not os.path.exists(apk): |
290 error_func('Unable to find additional APK: %s' % apk) | 423 error_func('Unable to find additional APK: %s' % apk) |
291 self._additional_apks = ( | 424 self._additional_apks = ( |
292 [apk_helper.ToHelper(x) for x in args.additional_apks]) | 425 [apk_helper.ToHelper(x) for x in args.additional_apks]) |
293 | 426 |
294 def _initializeDataDependencyAttributes(self, args, isolate_delegate): | 427 def _initializeDataDependencyAttributes(self, args, isolate_delegate): |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
462 device_rel_path, host_rel_path = t.split(':') | 595 device_rel_path, host_rel_path = t.split(':') |
463 host_abs_path = os.path.join(host_paths.DIR_SOURCE_ROOT, host_rel_path) | 596 host_abs_path = os.path.join(host_paths.DIR_SOURCE_ROOT, host_rel_path) |
464 self._data_deps.extend( | 597 self._data_deps.extend( |
465 [(host_abs_path, | 598 [(host_abs_path, |
466 [None, 'chrome', 'test', 'data', device_rel_path])]) | 599 [None, 'chrome', 'test', 'data', device_rel_path])]) |
467 | 600 |
468 def GetDataDependencies(self): | 601 def GetDataDependencies(self): |
469 return self._data_deps | 602 return self._data_deps |
470 | 603 |
471 def GetTests(self): | 604 def GetTests(self): |
472 pickle_path = '%s-proguard.pickle' % self.test_jar | 605 tests = GetAllTests(self.test_jar) |
473 try: | 606 filtered_tests = FilterTests( |
474 tests = self._GetTestsFromPickle(pickle_path, self.test_jar) | 607 tests, self._test_filter, self._annotations, self._excluded_annotations) |
475 except self.ProguardPickleException as e: | 608 return self._ParametrizeTestsWithFlags(self._InflateTests(filtered_tests)) |
476 logging.info('Getting tests from JAR via proguard. (%s)', str(e)) | |
477 tests = self._GetTestsFromProguard(self.test_jar) | |
478 self._SaveTestsToPickle(pickle_path, self.test_jar, tests) | |
479 return self._ParametrizeTestsWithFlags( | |
480 self._InflateTests(self._FilterTests(tests))) | |
481 | |
482 class ProguardPickleException(Exception): | |
483 pass | |
484 | |
485 def _GetTestsFromPickle(self, pickle_path, jar_path): | |
486 if not os.path.exists(pickle_path): | |
487 raise self.ProguardPickleException('%s does not exist.' % pickle_path) | |
488 if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path): | |
489 raise self.ProguardPickleException( | |
490 '%s newer than %s.' % (jar_path, pickle_path)) | |
491 | |
492 with open(pickle_path, 'r') as pickle_file: | |
493 pickle_data = pickle.loads(pickle_file.read()) | |
494 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
495 | |
496 try: | |
497 if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION: | |
498 raise self.ProguardPickleException('PICKLE_FORMAT_VERSION has changed.') | |
499 if pickle_data['JAR_MD5SUM'] != jar_md5: | |
500 raise self.ProguardPickleException('JAR file MD5 sum differs.') | |
501 return pickle_data['TEST_METHODS'] | |
502 except TypeError as e: | |
503 logging.error(pickle_data) | |
504 raise self.ProguardPickleException(str(e)) | |
505 | 609 |
506 # pylint: disable=no-self-use | 610 # pylint: disable=no-self-use |
507 def _GetTestsFromProguard(self, jar_path): | |
508 p = proguard.Dump(jar_path) | |
509 | |
510 def is_test_class(c): | |
511 return c['class'].endswith('Test') | |
512 | |
513 def is_test_method(m): | |
514 return m['method'].startswith('test') | |
515 | |
516 class_lookup = dict((c['class'], c) for c in p['classes']) | |
517 def recursive_get_class_annotations(c): | |
518 s = c['superclass'] | |
519 if s in class_lookup: | |
520 a = recursive_get_class_annotations(class_lookup[s]) | |
521 else: | |
522 a = {} | |
523 a.update(c['annotations']) | |
524 return a | |
525 | |
526 def stripped_test_class(c): | |
527 return { | |
528 'class': c['class'], | |
529 'annotations': recursive_get_class_annotations(c), | |
530 'methods': [m for m in c['methods'] if is_test_method(m)], | |
531 } | |
532 | |
533 return [stripped_test_class(c) for c in p['classes'] | |
534 if is_test_class(c)] | |
535 | |
536 def _SaveTestsToPickle(self, pickle_path, jar_path, tests): | |
537 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
538 pickle_data = { | |
539 'VERSION': _PICKLE_FORMAT_VERSION, | |
540 'JAR_MD5SUM': jar_md5, | |
541 'TEST_METHODS': tests, | |
542 } | |
543 with open(pickle_path, 'w') as pickle_file: | |
544 pickle.dump(pickle_data, pickle_file) | |
545 | |
546 def _FilterTests(self, tests): | |
547 | |
548 def gtest_filter(c, m): | |
549 if not self._test_filter: | |
550 return True | |
551 # Allow fully-qualified name as well as an omitted package. | |
552 names = ['%s.%s' % (c['class'], m['method']), | |
553 '%s.%s' % (c['class'].split('.')[-1], m['method'])] | |
554 return unittest_util.FilterTestNames(names, self._test_filter) | |
555 | |
556 def annotation_filter(all_annotations): | |
557 if not self._annotations: | |
558 return True | |
559 return any_annotation_matches(self._annotations, all_annotations) | |
560 | |
561 def excluded_annotation_filter(all_annotations): | |
562 if not self._excluded_annotations: | |
563 return True | |
564 return not any_annotation_matches(self._excluded_annotations, | |
565 all_annotations) | |
566 | |
567 def any_annotation_matches(annotations, all_annotations): | |
568 return any( | |
569 ak in all_annotations and (av is None or av == all_annotations[ak]) | |
570 for ak, av in annotations.iteritems()) | |
571 | |
572 filtered_classes = [] | |
573 for c in tests: | |
574 filtered_methods = [] | |
575 for m in c['methods']: | |
576 # Gtest filtering | |
577 if not gtest_filter(c, m): | |
578 continue | |
579 | |
580 all_annotations = dict(c['annotations']) | |
581 all_annotations.update(m['annotations']) | |
582 if (not annotation_filter(all_annotations) | |
583 or not excluded_annotation_filter(all_annotations)): | |
584 continue | |
585 | |
586 filtered_methods.append(m) | |
587 | |
588 if filtered_methods: | |
589 filtered_class = dict(c) | |
590 filtered_class['methods'] = filtered_methods | |
591 filtered_classes.append(filtered_class) | |
592 | |
593 return filtered_classes | |
594 | |
595 def _InflateTests(self, tests): | 611 def _InflateTests(self, tests): |
596 inflated_tests = [] | 612 inflated_tests = [] |
597 for c in tests: | 613 for c in tests: |
598 for m in c['methods']: | 614 for m in c['methods']: |
599 a = dict(c['annotations']) | 615 a = dict(c['annotations']) |
600 a.update(m['annotations']) | 616 a.update(m['annotations']) |
601 inflated_tests.append({ | 617 inflated_tests.append({ |
602 'class': c['class'], | 618 'class': c['class'], |
603 'method': m['method'], | 619 'method': m['method'], |
604 'annotations': a, | 620 'annotations': a, |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
641 @staticmethod | 657 @staticmethod |
642 def GenerateTestResults( | 658 def GenerateTestResults( |
643 result_code, result_bundle, statuses, start_ms, duration_ms): | 659 result_code, result_bundle, statuses, start_ms, duration_ms): |
644 return GenerateTestResults(result_code, result_bundle, statuses, | 660 return GenerateTestResults(result_code, result_bundle, statuses, |
645 start_ms, duration_ms) | 661 start_ms, duration_ms) |
646 | 662 |
647 #override | 663 #override |
648 def TearDown(self): | 664 def TearDown(self): |
649 if self._isolate_delegate: | 665 if self._isolate_delegate: |
650 self._isolate_delegate.Clear() | 666 self._isolate_delegate.Clear() |
651 | |
OLD | NEW |