OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2014 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 | |
6 import logging | |
7 import re | |
8 | |
9 from pylib import ports | |
10 from pylib.base import base_test_result | |
11 from pylib.base import test_run | |
12 from pylib.gtest import gtest_test_instance | |
13 | |
14 from pylib.local import local_test_server_spawner | |
15 from pylib.local.device import local_device_environment | |
16 from pylib.local.device import local_device_test_run | |
17 from pylib.utils import device_temp_file | |
18 | |
19 _COMMAND_LINE_FLAGS_SUPPORTED = True | |
20 | |
21 _EXTRA_COMMAND_LINE_FILE = ( | |
22 'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFile') | |
23 _EXTRA_COMMAND_LINE_FLAGS = ( | |
24 'org.chromium.native_test.ChromeNativeTestActivity.CommandLineFlags') | |
25 | |
26 _RE_TEST_STATUS = re.compile( | |
klundberg
2014/12/09 02:30:47
Might be worth having a note that these are needed
jbudorick
2014/12/09 15:46:51
Done.
| |
27 r'\[ +((?:RUN)|(?:FAILED)|(?:OK)) +\] ?(.*)(?: \((\d+) ms\))?') | |
28 _RE_TEST_RUN_STATUS = re.compile( | |
29 r'\[ +(PASSED|RUNNER_FAILED|CRASHED) \] ?(.*)') | |
30 | |
31 # Maybe this should move to the test instance? | |
klundberg
2014/12/09 02:30:47
If this a question for the reviewers?
If this is o
jbudorick
2014/12/09 15:46:51
No, this was a question for me. Whether or not thi
| |
32 _SUITE_REQUIRES_TEST_SERVER_SPAWNER = [ | |
33 'content_unittests', 'content_browsertests', 'net_unittests', 'unit_tests' | |
34 ] | |
35 | |
36 class LocalDeviceGtestRun(local_device_test_run.LocalDeviceTestRun): | |
37 | |
38 def __init__(self, env, test_instance): | |
39 assert isinstance(env, local_device_environment.LocalDeviceEnvironment) | |
40 assert isinstance(test_instance, gtest_test_instance.GtestTestInstance) | |
41 super(LocalDeviceGtestRun, self).__init__(env, test_instance) | |
42 | |
43 # TODO(jbudorick): These will be different for content_browsertests. | |
44 self._package = 'org.chromium.native_test' | |
45 self._runner = '.ChromiumNativeTestInstrumentationTestRunner' | |
46 self._component = '%s/%s' % (self._package, self._runner) | |
47 self._server_factories = [] | |
48 self._servers = {} | |
49 | |
50 if self.TestPackage() in _SUITE_REQUIRES_TEST_SERVER_SPAWNER: | |
51 def test_server_spawner_factory(dev): | |
52 port = ports.AllocateTestServerPort() | |
53 return local_test_server_spawner.LocalTestServerSpawner(port, dev, None) | |
54 self._server_factories.append(test_server_spawner_factory) | |
klundberg
2014/12/09 02:30:47
I think I remember that different tests have diffe
jbudorick
2014/12/09 15:46:51
At the time, I wasn't sure if the gtests would nee
| |
55 | |
56 #override | |
57 def TestPackage(self): | |
58 return self._test_instance._suite | |
59 | |
60 #override | |
61 def SetUp(self): | |
62 | |
63 def individual_device_set_up(dev, hdt): | |
klundberg
2014/12/09 02:30:47
I'm not sure what hdt is supposed to stand for?
Wo
jbudorick
2014/12/09 15:46:51
shorthand for host_device_tuples
| |
64 # install test apk | |
klundberg
2014/12/09 02:30:47
Install test APK.
jbudorick
2014/12/09 15:46:51
Done.
| |
65 dev.Install(self._test_instance.apk) | |
66 | |
67 # push data deps | |
klundberg
2014/12/09 02:30:47
Push data dependencies.
jbudorick
2014/12/09 15:46:51
Done.
| |
68 external_storage = dev.GetExternalStoragePath() | |
69 hdt = [(h, d if d is not None else external_storage) | |
70 for h, d in hdt] | |
71 dev.PushChangedFiles(hdt) | |
72 | |
73 self._servers[str(dev)] = [s(dev) for s in self._server_factories] | |
74 for s in self._servers[str(dev)]: | |
75 s.SetUp() | |
76 | |
77 self._env.parallel_devices.pMap(individual_device_set_up, | |
78 self._test_instance.GetDataDependencies()) | |
79 | |
80 #override | |
81 def _ShouldShard(self): | |
82 return True | |
83 | |
84 #override | |
85 def _CreateShards(self, tests): | |
86 device_count = len(self._env.devices) | |
87 return [':'.join(tests[i::device_count]) | |
88 for i in xrange(0, device_count)] | |
89 | |
90 #override | |
91 def _GetTests(self): | |
92 tests = self._env.devices[0].StartInstrumentation( | |
93 self._component, | |
94 extras={_EXTRA_COMMAND_LINE_FLAGS: '_ --gtest_list_tests'}, | |
95 raw=False) | |
96 tests = gtest_test_instance.ParseGTestListTests(tests) | |
97 tests = self._test_instance.FilterTests(tests) | |
98 return tests | |
99 | |
100 #override | |
101 def _RunTest(self, device, test): | |
102 | |
103 # Run the test. | |
104 with device_temp_file.DeviceTempFile(device.adb) as command_line_file: | |
105 device.WriteFile( | |
106 command_line_file.name, | |
107 '_ --gtest_filter=%s' % test) | |
108 | |
109 output = device.StartInstrumentation( | |
110 self._component, | |
111 extras={_EXTRA_COMMAND_LINE_FILE: command_line_file.name}, | |
112 timeout=900, retries=0) | |
113 | |
114 for s in self._servers[str(device)]: | |
115 s.Reset() | |
116 device.ClearApplicationState(self._package) | |
117 | |
118 # Parse the output. | |
119 # TODO(jbudorick): Transition test scripts away from parsing stdout. | |
120 results = [] | |
121 for l in output: | |
122 matcher = _RE_TEST_STATUS.match(l) | |
123 if matcher: | |
124 result_type = None | |
125 if matcher.group(1) == 'OK': | |
126 result_type = base_test_result.ResultType.PASS | |
127 elif matcher.group(1) == 'FAILED': | |
128 result_type = base_test_result.ResultType.FAIL | |
129 | |
130 if result_type: | |
131 test_name = matcher.group(2) | |
132 duration = matcher.group(3) if matcher.group(3) else 0 | |
133 results.append(base_test_result.BaseTestResult( | |
134 test_name, result_type, duration)) | |
135 logging.info(l) | |
136 return results | |
137 | |
138 #override | |
139 def TearDown(self): | |
140 def individual_device_tear_down(dev): | |
141 for s in self._servers[str(dev)]: | |
142 s.TearDown() | |
143 | |
144 self._env.parallel_devices.pMap(individual_device_tear_down) | |
145 | |
OLD | NEW |