OLD | NEW |
---|---|
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Generates test runner factory and tests for performance tests.""" | 5 """Generates test runner factory and tests for performance tests.""" |
6 | 6 |
7 import json | 7 import json |
8 import fnmatch | 8 import fnmatch |
9 import logging | |
9 import os | 10 import os |
10 import shutil | 11 import shutil |
11 | 12 |
13 from pylib import android_commands | |
12 from pylib import constants | 14 from pylib import constants |
13 from pylib import forwarder | 15 from pylib import forwarder |
14 from pylib.perf import test_runner | 16 from pylib.perf import test_runner |
15 from pylib.utils import test_environment | 17 from pylib.utils import test_environment |
16 | 18 |
17 | 19 |
20 def _GetAllDevices(): | |
21 out_dir = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | |
22 devices_path = os.path.join(out_dir, '.last_devices') | |
jbudorick
2014/05/29 19:45:25
Where does the .last_devices file come from?
bulach
2014/05/30 09:21:26
good point! I did some changes in buildbot/bb_devi
| |
23 devices = [] | |
24 try: | |
25 with open(devices_path) as f: | |
26 devices = f.read().splitlines() | |
jbudorick
2014/05/29 19:45:25
I'm concerned about how the tests will behave if t
bulach
2014/05/30 09:21:26
sorry, my bad! :) I should've given more context h
| |
27 except IOError as e: | |
28 logging.error('Unable to find .last_devices [%s]', e) | |
29 devices = android_commands.GetAttachedDevices() | |
30 return sorted(devices) | |
31 | |
32 | |
33 def _GetStepsDictFromSingleStep(test_options): | |
34 # Running a single command, build the tests structure. | |
35 steps_dict = { | |
36 'version': 1, | |
37 'steps': { | |
38 'single_step': { | |
39 'device_affinity': 0, | |
40 'cmd': test_options.single_step | |
41 }, | |
42 } | |
43 } | |
44 return steps_dict | |
45 | |
46 # TODO(bulach): remove once it rolls downstream, crbug.com/378862. | |
47 def _GetStepsDictFromV0(steps_v0): | |
48 steps_dict = { | |
49 'version': 1, | |
50 'steps': {}, | |
51 } | |
52 affinity = 0 | |
53 for step in steps_v0: | |
54 steps_dict['steps'][step[0]] = { | |
55 'device_affinity': affinity, | |
56 'cmd': step[1], | |
57 } | |
58 affinity += 1 | |
59 return steps_dict | |
60 | |
61 | |
62 def _GetStepsDict(test_options): | |
63 if test_options.single_step: | |
64 return _GetStepsDictFromSingleStep(test_options) | |
65 if test_options.steps: | |
66 with file(test_options.steps, 'r') as f: | |
67 steps = json.load(f) | |
68 # TODO(bulach): remove once it rolls downstream, crbug.com/378862. | |
69 if isinstance(steps, list): | |
70 return _GetStepsDictFromV0(steps) | |
71 | |
72 # Already using the new format. | |
73 assert steps['version'] == 1 | |
74 return steps | |
75 | |
76 | |
18 def Setup(test_options): | 77 def Setup(test_options): |
19 """Create and return the test runner factory and tests. | 78 """Create and return the test runner factory and tests. |
20 | 79 |
21 Args: | 80 Args: |
22 test_options: A PerformanceOptions object. | 81 test_options: A PerformanceOptions object. |
23 | 82 |
24 Returns: | 83 Returns: |
25 A tuple of (TestRunnerFactory, tests). | 84 A tuple of (TestRunnerFactory, tests, devices). |
26 """ | 85 """ |
27 # TODO(bulach): remove this once the bot side lands. BUG=318369 | 86 # TODO(bulach): remove this once the bot side lands. BUG=318369 |
28 constants.SetBuildType('Release') | 87 constants.SetBuildType('Release') |
29 if os.path.exists(constants.PERF_OUTPUT_DIR): | 88 if os.path.exists(constants.PERF_OUTPUT_DIR): |
30 shutil.rmtree(constants.PERF_OUTPUT_DIR) | 89 shutil.rmtree(constants.PERF_OUTPUT_DIR) |
31 os.makedirs(constants.PERF_OUTPUT_DIR) | 90 os.makedirs(constants.PERF_OUTPUT_DIR) |
32 | 91 |
33 # Before running the tests, kill any leftover server. | 92 # Before running the tests, kill any leftover server. |
34 test_environment.CleanupLeftoverProcesses() | 93 test_environment.CleanupLeftoverProcesses() |
35 forwarder.Forwarder.UseMultiprocessing() | 94 forwarder.Forwarder.UseMultiprocessing() |
36 | 95 |
37 if test_options.single_step: | 96 # We want to keep device affinity, so return all devices ever seen. |
38 # Running a single command, build the tests structure. | 97 all_devices = _GetAllDevices() |
39 tests = [['single_step', test_options.single_step]] | |
40 | 98 |
41 if test_options.steps: | 99 steps_dict = _GetStepsDict(test_options) |
42 with file(test_options.steps, 'r') as f: | 100 sorted_step_names = sorted(steps_dict['steps'].keys()) |
43 tests = json.load(f) | |
44 | |
45 # The list is necessary to keep the steps order, but internally | |
46 # the format is squashed from a list of lists into a single dict: | |
47 # [["A", "cmd"], ["B", "cmd"]] into {"A": "cmd", "B": "cmd"} | |
48 sorted_test_names = [i[0] for i in tests] | |
49 tests_dict = dict(tests) | |
50 | 101 |
51 if test_options.test_filter: | 102 if test_options.test_filter: |
52 sorted_test_names = fnmatch.filter(sorted_test_names, | 103 sorted_step_names = fnmatch.filter(sorted_step_names, |
53 test_options.test_filter) | 104 test_options.test_filter) |
54 tests_dict = dict((k, v) for k, v in tests_dict.iteritems() | |
55 if k in sorted_test_names) | |
56 | 105 |
57 flaky_steps = [] | 106 flaky_steps = [] |
58 if test_options.flaky_steps: | 107 if test_options.flaky_steps: |
59 with file(test_options.flaky_steps, 'r') as f: | 108 with file(test_options.flaky_steps, 'r') as f: |
60 flaky_steps = json.load(f) | 109 flaky_steps = json.load(f) |
61 | 110 |
62 def TestRunnerFactory(device, _shard_index): | 111 def TestRunnerFactory(device, shard_index): |
63 return test_runner.TestRunner( | 112 return test_runner.TestRunner( |
64 test_options, device, tests_dict, flaky_steps) | 113 test_options, device, shard_index, len(all_devices), |
114 steps_dict, flaky_steps) | |
65 | 115 |
66 return (TestRunnerFactory, sorted_test_names) | 116 return (TestRunnerFactory, sorted_step_names, all_devices) |
OLD | NEW |