OLD | NEW |
---|---|
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import collections | 5 import collections |
6 import copy | 6 import copy |
7 import logging | 7 import logging |
8 import os | 8 import os |
9 import pickle | 9 import pickle |
10 import re | 10 import re |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
42 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass') | 42 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TargetClass') |
43 _EXTRA_TIMEOUT_SCALE = ( | 43 _EXTRA_TIMEOUT_SCALE = ( |
44 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale') | 44 'org.chromium.test.driver.OnDeviceInstrumentationDriver.TimeoutScale') |
45 | 45 |
46 _PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' | 46 _PARAMETERIZED_TEST_ANNOTATION = 'ParameterizedTest' |
47 _PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' | 47 _PARAMETERIZED_TEST_SET_ANNOTATION = 'ParameterizedTest$Set' |
48 _NATIVE_CRASH_RE = re.compile('native crash', re.IGNORECASE) | 48 _NATIVE_CRASH_RE = re.compile('native crash', re.IGNORECASE) |
49 _PICKLE_FORMAT_VERSION = 10 | 49 _PICKLE_FORMAT_VERSION = 10 |
50 | 50 |
51 | 51 |
52 class ProguardPickleException(Exception): | |
53 pass | |
54 | |
55 | |
52 # TODO(jbudorick): Make these private class methods of | 56 # TODO(jbudorick): Make these private class methods of |
53 # InstrumentationTestInstance once the instrumentation test_runner is | 57 # InstrumentationTestInstance once the instrumentation test_runner is |
54 # deprecated. | 58 # deprecated. |
55 def ParseAmInstrumentRawOutput(raw_output): | 59 def ParseAmInstrumentRawOutput(raw_output): |
56 """Parses the output of an |am instrument -r| call. | 60 """Parses the output of an |am instrument -r| call. |
57 | 61 |
58 Args: | 62 Args: |
59 raw_output: the output of an |am instrument -r| call as a list of lines | 63 raw_output: the output of an |am instrument -r| call as a list of lines |
60 Returns: | 64 Returns: |
61 A 3-tuple containing: | 65 A 3-tuple containing: |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
181 to_remove = [] | 185 to_remove = [] |
182 for a in p.get('arguments', []): | 186 for a in p.get('arguments', []): |
183 if a['name'] == 'add': | 187 if a['name'] == 'add': |
184 to_add = ['--%s' % f for f in a['stringArray']] | 188 to_add = ['--%s' % f for f in a['stringArray']] |
185 elif a['name'] == 'remove': | 189 elif a['name'] == 'remove': |
186 to_remove = ['--%s' % f for f in a['stringArray']] | 190 to_remove = ['--%s' % f for f in a['stringArray']] |
187 result.append(ParamsTuple(to_add, to_remove)) | 191 result.append(ParamsTuple(to_add, to_remove)) |
188 return result if result else None | 192 return result if result else None |
189 | 193 |
190 | 194 |
195 def FilterTests(tests, test_filter=None, annotations=None, | |
196 excluded_annotations=None): | |
197 """Filter a list of tests | |
198 | |
199 Args: | |
200 tests: a list of tests. e.g. [ | |
201 {'annotations": {}, 'class': 'com.example.TestA', 'methods':[]}, | |
202 {'annotations": {}, 'class': 'com.example.TestB', 'methods':[]}] | |
203 test_filter: googletest-style filter string. | |
204 annotations: a dict of wanted annotations for test methods. | |
205 exclude_annotations: a dict of annotations to exclude. | |
206 | |
207 Return: | |
208 A list of filtered tests | |
209 """ | |
210 def gtest_filter(c, m): | |
211 t = ['%s.%s' % (c['class'].split('.')[-1], m['method'])] | |
212 return (not test_filter | |
213 or unittest_util.FilterTestNames(t, test_filter)) | |
214 | |
215 def annotation_filter(all_annotations): | |
216 if not annotations: | |
217 return True | |
218 return any_annotation_matches(annotations, all_annotations) | |
219 | |
220 def excluded_annotation_filter(all_annotations): | |
221 if not excluded_annotations: | |
222 return True | |
223 return not any_annotation_matches(excluded_annotations, | |
224 all_annotations) | |
225 | |
226 def any_annotation_matches(annotations, all_annotations): | |
227 return any( | |
228 ak in all_annotations and (av is None or av == all_annotations[ak]) | |
229 for ak, av in annotations.iteritems()) | |
230 | |
231 filtered_classes = [] | |
232 for c in tests: | |
233 filtered_methods = [] | |
234 for m in c['methods']: | |
235 # Gtest filtering | |
236 if not gtest_filter(c, m): | |
237 continue | |
238 | |
239 all_annotations = dict(c['annotations']) | |
240 all_annotations.update(m['annotations']) | |
241 if (not annotation_filter(all_annotations) | |
242 or not excluded_annotation_filter(all_annotations)): | |
243 continue | |
244 | |
245 filtered_methods.append(m) | |
246 | |
247 if filtered_methods: | |
248 filtered_class = dict(c) | |
249 filtered_class['methods'] = filtered_methods | |
250 filtered_classes.append(filtered_class) | |
251 | |
252 return filtered_classes | |
253 | |
254 | |
255 def GetAllTests(test_jar): | |
256 pickle_path = '%s-proguard.pickle' % test_jar | |
257 try: | |
258 tests = _GetTestsFromPickle(pickle_path, test_jar) | |
259 except ProguardPickleException as e: | |
260 logging.info('Could not get tests from pickle: %s', e) | |
261 logging.info('Getting tests from JAR via proguard.') | |
262 tests = _GetTestsFromProguard(test_jar) | |
263 _SaveTestsToPickle(pickle_path, test_jar, tests) | |
264 return tests | |
265 | |
266 | |
267 def _GetTestsFromPickle(pickle_path, jar_path): | |
268 if not os.path.exists(pickle_path): | |
269 raise ProguardPickleException('%s does not exist.' % pickle_path) | |
270 if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path): | |
271 raise ProguardPickleException( | |
272 '%s newer than %s.' % (jar_path, pickle_path)) | |
273 | |
274 with open(pickle_path, 'r') as pickle_file: | |
275 pickle_data = pickle.loads(pickle_file.read()) | |
276 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
277 | |
278 if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION: | |
279 raise ProguardPickleException('PICKLE_FORMAT_VERSION has changed.') | |
280 if pickle_data['JAR_MD5SUM'] != jar_md5: | |
281 raise ProguardPickleException('JAR file MD5 sum differs.') | |
282 return pickle_data['TEST_METHODS'] | |
283 | |
284 def _GetTestsFromProguard(jar_path): | |
285 p = proguard.Dump(jar_path) | |
286 class_lookup = dict((c['class'], c) for c in p['classes']) | |
287 | |
288 def is_test_class(c): | |
289 return c['class'].endswith('Test') | |
290 | |
291 def is_test_method(m): | |
292 return m['method'].startswith('test') | |
293 | |
294 def recursive_class_annotations(c): | |
295 s = c['superclass'] | |
296 if s in class_lookup: | |
297 a = recursive_class_annotations(class_lookup[s]) | |
298 else: | |
299 a = {} | |
300 a.update(c['annotations']) | |
301 return a | |
302 | |
303 def stripped_test_class(c): | |
304 return { | |
305 'class': c['class'], | |
306 'annotations': recursive_class_annotations(c), | |
307 'methods': [m for m in c['methods'] if is_test_method(m)], | |
308 } | |
309 | |
310 return [stripped_test_class(c) for c in p['classes'] | |
311 if is_test_class(c)] | |
312 | |
313 def _SaveTestsToPickle(pickle_path, jar_path, tests): | |
314 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
315 pickle_data = { | |
316 'VERSION': _PICKLE_FORMAT_VERSION, | |
317 'JAR_MD5SUM': jar_md5, | |
318 'TEST_METHODS': tests, | |
319 } | |
320 with open(pickle_path, 'w') as pickle_file: | |
321 pickle.dump(pickle_data, pickle_file) | |
322 | |
191 class InstrumentationTestInstance(test_instance.TestInstance): | 323 class InstrumentationTestInstance(test_instance.TestInstance): |
192 | 324 |
193 def __init__(self, args, isolate_delegate, error_func): | 325 def __init__(self, args, isolate_delegate, error_func): |
194 super(InstrumentationTestInstance, self).__init__() | 326 super(InstrumentationTestInstance, self).__init__() |
195 | 327 |
196 self._additional_apks = [] | 328 self._additional_apks = [] |
197 self._apk_under_test = None | 329 self._apk_under_test = None |
198 self._apk_under_test_incremental_install_script = None | 330 self._apk_under_test_incremental_install_script = None |
199 self._package_info = None | 331 self._package_info = None |
200 self._suite = None | 332 self._suite = None |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
273 error_func('Unable to find test JAR: %s' % self._test_jar) | 405 error_func('Unable to find test JAR: %s' % self._test_jar) |
274 | 406 |
275 self._test_package = self._test_apk.GetPackageName() | 407 self._test_package = self._test_apk.GetPackageName() |
276 self._test_runner = self._test_apk.GetInstrumentationName() | 408 self._test_runner = self._test_apk.GetInstrumentationName() |
277 | 409 |
278 self._package_info = None | 410 self._package_info = None |
279 if self._apk_under_test: | 411 if self._apk_under_test: |
280 package_under_test = self._apk_under_test.GetPackageName() | 412 package_under_test = self._apk_under_test.GetPackageName() |
281 for package_info in constants.PACKAGE_INFO.itervalues(): | 413 for package_info in constants.PACKAGE_INFO.itervalues(): |
282 if package_under_test == package_info.package: | 414 if package_under_test == package_info.package: |
283 self._package_info = package_info | 415 self._package_info = package_info |
perezju
2016/04/25 12:45:07
should this also break?
Yoland Yan(Google)
2016/04/26 01:10:08
Ya, PACKAGE_INFO values should be unique
| |
284 if not self._package_info: | 416 if not self._package_info: |
285 logging.warning('Unable to find package info for %s', self._test_package) | 417 logging.warning('Unable to find package info for %s', self._test_package) |
286 | 418 |
287 for apk in args.additional_apks: | 419 for apk in args.additional_apks: |
288 if not os.path.exists(apk): | 420 if not os.path.exists(apk): |
289 error_func('Unable to find additional APK: %s' % apk) | 421 error_func('Unable to find additional APK: %s' % apk) |
290 self._additional_apks = ( | 422 self._additional_apks = ( |
291 [apk_helper.ToHelper(x) for x in args.additional_apks]) | 423 [apk_helper.ToHelper(x) for x in args.additional_apks]) |
292 | 424 |
293 def _initializeDataDependencyAttributes(self, args, isolate_delegate): | 425 def _initializeDataDependencyAttributes(self, args, isolate_delegate): |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
460 device_rel_path, host_rel_path = t.split(':') | 592 device_rel_path, host_rel_path = t.split(':') |
461 host_abs_path = os.path.join(host_paths.DIR_SOURCE_ROOT, host_rel_path) | 593 host_abs_path = os.path.join(host_paths.DIR_SOURCE_ROOT, host_rel_path) |
462 self._data_deps.extend( | 594 self._data_deps.extend( |
463 [(host_abs_path, | 595 [(host_abs_path, |
464 [None, 'chrome', 'test', 'data', device_rel_path])]) | 596 [None, 'chrome', 'test', 'data', device_rel_path])]) |
465 | 597 |
466 def GetDataDependencies(self): | 598 def GetDataDependencies(self): |
467 return self._data_deps | 599 return self._data_deps |
468 | 600 |
469 def GetTests(self): | 601 def GetTests(self): |
470 pickle_path = '%s-proguard.pickle' % self.test_jar | 602 tests = GetAllTests(self.test_jar) |
471 try: | 603 filtered_tests = FilterTests( |
472 tests = self._GetTestsFromPickle(pickle_path, self.test_jar) | 604 tests, self._test_filter, self._annotations, self._excluded_annotations) |
473 except self.ProguardPickleException as e: | 605 return self._ParametrizeTestsWithFlags(self._InflateTests(filtered_tests)) |
474 logging.info('Getting tests from JAR via proguard. (%s)', str(e)) | |
475 tests = self._GetTestsFromProguard(self.test_jar) | |
476 self._SaveTestsToPickle(pickle_path, self.test_jar, tests) | |
477 return self._ParametrizeTestsWithFlags( | |
478 self._InflateTests(self._FilterTests(tests))) | |
479 | |
480 class ProguardPickleException(Exception): | |
481 pass | |
482 | |
483 def _GetTestsFromPickle(self, pickle_path, jar_path): | |
484 if not os.path.exists(pickle_path): | |
485 raise self.ProguardPickleException('%s does not exist.' % pickle_path) | |
486 if os.path.getmtime(pickle_path) <= os.path.getmtime(jar_path): | |
487 raise self.ProguardPickleException( | |
488 '%s newer than %s.' % (jar_path, pickle_path)) | |
489 | |
490 with open(pickle_path, 'r') as pickle_file: | |
491 pickle_data = pickle.loads(pickle_file.read()) | |
492 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
493 | |
494 try: | |
495 if pickle_data['VERSION'] != _PICKLE_FORMAT_VERSION: | |
496 raise self.ProguardPickleException('PICKLE_FORMAT_VERSION has changed.') | |
497 if pickle_data['JAR_MD5SUM'] != jar_md5: | |
498 raise self.ProguardPickleException('JAR file MD5 sum differs.') | |
499 return pickle_data['TEST_METHODS'] | |
500 except TypeError as e: | |
501 logging.error(pickle_data) | |
502 raise self.ProguardPickleException(str(e)) | |
503 | 606 |
504 # pylint: disable=no-self-use | 607 # pylint: disable=no-self-use |
505 def _GetTestsFromProguard(self, jar_path): | |
506 p = proguard.Dump(jar_path) | |
507 | |
508 def is_test_class(c): | |
509 return c['class'].endswith('Test') | |
510 | |
511 def is_test_method(m): | |
512 return m['method'].startswith('test') | |
513 | |
514 class_lookup = dict((c['class'], c) for c in p['classes']) | |
515 def recursive_get_class_annotations(c): | |
516 s = c['superclass'] | |
517 if s in class_lookup: | |
518 a = recursive_get_class_annotations(class_lookup[s]) | |
519 else: | |
520 a = {} | |
521 a.update(c['annotations']) | |
522 return a | |
523 | |
524 def stripped_test_class(c): | |
525 return { | |
526 'class': c['class'], | |
527 'annotations': recursive_get_class_annotations(c), | |
528 'methods': [m for m in c['methods'] if is_test_method(m)], | |
529 } | |
530 | |
531 return [stripped_test_class(c) for c in p['classes'] | |
532 if is_test_class(c)] | |
533 | |
534 def _SaveTestsToPickle(self, pickle_path, jar_path, tests): | |
535 jar_md5 = md5sum.CalculateHostMd5Sums(jar_path)[jar_path] | |
536 pickle_data = { | |
537 'VERSION': _PICKLE_FORMAT_VERSION, | |
538 'JAR_MD5SUM': jar_md5, | |
539 'TEST_METHODS': tests, | |
540 } | |
541 with open(pickle_path, 'w') as pickle_file: | |
542 pickle.dump(pickle_data, pickle_file) | |
543 | |
544 def _FilterTests(self, tests): | |
545 | |
546 def gtest_filter(c, m): | |
547 t = ['%s.%s' % (c['class'].split('.')[-1], m['method'])] | |
548 return (not self._test_filter | |
549 or unittest_util.FilterTestNames(t, self._test_filter)) | |
550 | |
551 def annotation_filter(all_annotations): | |
552 if not self._annotations: | |
553 return True | |
554 return any_annotation_matches(self._annotations, all_annotations) | |
555 | |
556 def excluded_annotation_filter(all_annotations): | |
557 if not self._excluded_annotations: | |
558 return True | |
559 return not any_annotation_matches(self._excluded_annotations, | |
560 all_annotations) | |
561 | |
562 def any_annotation_matches(annotations, all_annotations): | |
563 return any( | |
564 ak in all_annotations and (av is None or av == all_annotations[ak]) | |
565 for ak, av in annotations.iteritems()) | |
566 | |
567 filtered_classes = [] | |
568 for c in tests: | |
569 filtered_methods = [] | |
570 for m in c['methods']: | |
571 # Gtest filtering | |
572 if not gtest_filter(c, m): | |
573 continue | |
574 | |
575 all_annotations = dict(c['annotations']) | |
576 all_annotations.update(m['annotations']) | |
577 if (not annotation_filter(all_annotations) | |
578 or not excluded_annotation_filter(all_annotations)): | |
579 continue | |
580 | |
581 filtered_methods.append(m) | |
582 | |
583 if filtered_methods: | |
584 filtered_class = dict(c) | |
585 filtered_class['methods'] = filtered_methods | |
586 filtered_classes.append(filtered_class) | |
587 | |
588 return filtered_classes | |
589 | |
590 def _InflateTests(self, tests): | 608 def _InflateTests(self, tests): |
591 inflated_tests = [] | 609 inflated_tests = [] |
592 for c in tests: | 610 for c in tests: |
593 for m in c['methods']: | 611 for m in c['methods']: |
594 a = dict(c['annotations']) | 612 a = dict(c['annotations']) |
595 a.update(m['annotations']) | 613 a.update(m['annotations']) |
596 inflated_tests.append({ | 614 inflated_tests.append({ |
597 'class': c['class'], | 615 'class': c['class'], |
598 'method': m['method'], | 616 'method': m['method'], |
599 'annotations': a, | 617 'annotations': a, |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
636 @staticmethod | 654 @staticmethod |
637 def GenerateTestResults( | 655 def GenerateTestResults( |
638 result_code, result_bundle, statuses, start_ms, duration_ms): | 656 result_code, result_bundle, statuses, start_ms, duration_ms): |
639 return GenerateTestResults(result_code, result_bundle, statuses, | 657 return GenerateTestResults(result_code, result_bundle, statuses, |
640 start_ms, duration_ms) | 658 start_ms, duration_ms) |
641 | 659 |
642 #override | 660 #override |
643 def TearDown(self): | 661 def TearDown(self): |
644 if self._isolate_delegate: | 662 if self._isolate_delegate: |
645 self._isolate_delegate.Clear() | 663 self._isolate_delegate.Clear() |
646 | |
OLD | NEW |