Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(716)

Side by Side Diff: scripts/slave/recipe_modules/ct_swarming/api.py

Issue 1423993007: CT Perf recipe to run benchmarks on the top 1k sites using swarming (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build@master
Patch Set: Use isolate and swarming recipes Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5
6 from recipe_engine import recipe_api
7
8
9 CT_GS_BUCKET = 'cluster-telemetry'
10
11
12 class CTSwarmingApi(recipe_api.RecipeApi):
13 """Provides steps to run CT tasks on swarming bots."""
14
15 def __init__(self, **kwargs):
16 super(CTSwarmingApi, self).__init__(**kwargs)
17 # Path to the chromium src directory. Will be populated when
18 # checkout_dependencies is called.
19 self.chromium_src_dir = None
M-A Ruel 2015/11/12 17:33:41 If you want to keep this, I'd prefer @property de
rmistry 2015/11/13 14:59:58 Agreed. Done.
20 # Path to where artifacts should be downloaded from Google Storage. Will be
21 # populated when checkout_dependencies is called.
22 self.downloads_dir = None
M-A Ruel 2015/11/12 17:33:41 Make this a property, this reduces aliasing of the
rmistry 2015/11/13 14:59:58 Done.
23 # Path where swarming artifacts (isolate file, json output, etc) will be
24 # stored. Will be populated when checkout dependencies is caled.
25 self.swarming_temp_dir = None
26 # Keep track of all dirs created during recipe execution. Will be cleaned
27 # up by the cleanup step.
28 self._created_dirs = []
29 # Collection of all swarming tasks triggered by this recipe.
30 self._swarming_tasks = []
31
32 def checkout_dependencies(self):
33 """Checks out all repositories required for CT to run on swarming bots."""
34 # Checkout chromium and swarming.
35 self.m.chromium.set_config('chromium')
36 self.m.gclient.set_config('chromium')
37 self.m.bot_update.ensure_checkout(force=True)
38 self.m.swarming_client.checkout()
39 # Set the paths required by this recipe module.
40 self.chromium_src_dir = self.m.path['checkout']
41 self.downloads_dir = self.chromium_src_dir.join('content', 'test', 'ct')
42 self.swarming_temp_dir = self.m.path['tmp_base'].join('swarming_temp_dir')
43 self._makedirs(self.swarming_temp_dir)
44 # Ensure swarming_client is compatible with what recipes expect.
45 self.m.swarming.check_client_version()
46
47 def _makedirs(self, d):
48 """Creates the specified dir and registers it as a recipe created dir.
49
50 All directories created by this method will be deleted when cleanup() is
51 called.
52 """
53 self.m.file.makedirs('makedirs %s' % d, d)
54 self._created_dirs.append(d)
M-A Ruel 2015/11/12 17:33:41 IIUC, 'tmp_base' is deleted automatically, so no n
rmistry 2015/11/13 14:59:58 Yes looks like tmp_base is deleted automatically.
55
56 def download_CT_binary(self, ct_binary_name):
57 """Downloads the specified CT binary from GS into the downloads_dir."""
58 self.m.gsutil.download(
59 bucket=CT_GS_BUCKET,
60 source='swarming/binaries/%s' % ct_binary_name,
61 dest=self.downloads_dir.join(ct_binary_name))
62
63 def download_page_artifacts(self, page_type, slave_num, local_slave_dir):
64 """Downloads all the artifacts needed to run benchmarks on a page.
65
66 Args:
67 page_type: str. The CT page type. Eg: 1k, 10k.
68 slave_num: int. The number of the slave used to determine which GS
69 directory to download from. Eg: for the top 1k, slave1 will
70 contain webpages 1-10, slave2 will contain 11-20.
71 local_slave_dir: path obj. The directory artifacts should be downloaded
72 to.
73 """
74 # Download page sets.
75 page_sets_dir = local_slave_dir.join('page_sets')
76 self._makedirs(page_sets_dir)
77 self.m.gsutil.download(
78 bucket=CT_GS_BUCKET,
79 source='swarming/page_sets/%s/slave%s/*' % (page_type, slave_num),
80 dest=page_sets_dir)
81
82 # Download archives.
83 wpr_dir = page_sets_dir.join('data')
84 self._makedirs(wpr_dir)
85 self.m.gsutil.download(
86 bucket=CT_GS_BUCKET,
87 source='swarming/webpage_archives/%s/slave%s/*' % (page_type,
88 slave_num),
89 dest=wpr_dir)
90
91 def create_isolated_gen_json(self, isolate_path, base_dir, os_type,
92 slave_num):
93 """Creates an isolated.gen.json file.
94
95 Args:
96 isolate_path: path obj. Path to the isolate file.
97 base_dir: path obj. Dir that is the base of all paths in the isolate file.
98 os_type: str. The OS type to use when archiving the isolate file.
99 Eg: linux.
100 slave_num: int. The slave we want to create isolated.gen.json file for.
101
102 Returns:
103 Path to the isolated.gen.json file.
104 """
105 isolated_path = self.swarming_temp_dir.join(
106 'ct-task-%s.isolated' % slave_num)
107 isolate_args = [
108 '--isolate', isolate_path,
109 '--isolated', isolated_path,
110 '--config-variable', 'OS', os_type,
111 ]
112 isolated_gen_dict = {
113 'version': 1,
114 'dir': base_dir,
115 'args': isolate_args,
116 }
117 isolated_gen_json = self.swarming_temp_dir.join(
118 'slave%s.isolated.gen.json' % slave_num)
119 with open(str(isolated_gen_json), 'w') as fout:
120 fout.write(self.m.json.dumps(isolated_gen_dict, indent=4))
121 return isolated_gen_json
122
123 def batcharchive(self, num_slaves):
124 """Calls batcharchive on the specified isolated.gen.json files.
125
126 Args:
127 num_slaves: int. The number of slaves we will batcharchive
128 isolated.gen.json files for.
129 """
130 self.m.isolate.isolate_tests(
131 build_dir=self.swarming_temp_dir,
132 targets=['slave%s' % num for num in range(1, num_slaves+1)])
133
134 def trigger_swarming_tasks(self, swarm_hashes, task_name_prefix, dimensions):
135 """Triggers swarming tasks using swarm hashes.
136
137 Args:
138 swarm_hashes: list of str. List of swarm hashes from the isolate server.
139 task_name_prefix: The prefix to use when creating task_name.
140 dimensions: dict of str to str. The dimensions to run the task on.
141 Eg: {'os': 'Ubuntu', 'gpu': '10de'}
142 """
143 task_num = 0
144 for swarm_hash in swarm_hashes:
145 task_num += 1
146 swarming_task = self.m.swarming.task(
147 title='%s-%s' % (task_name_prefix, task_num),
148 isolated_hash=swarm_hash)
149 swarming_task.dimensions = dimensions
150 swarming_task.priority = 90
151 self._swarming_tasks.append(swarming_task)
152 self.m.swarming.trigger(self._swarming_tasks)
153
154 def collect_swarming_tasks(self):
155 """Collects all swarming tasks triggered by this recipe."""
156 self.m.swarming.collect(self._swarming_tasks)
157
158 def cleanup(self):
159 """Cleans up all directories created by this recipe module."""
160 for d in self._created_dirs:
M-A Ruel 2015/11/12 17:33:41 This shouldn't be needed in practice, if it is, we
rmistry 2015/11/13 14:59:58 Yes I do not think this is needed, I thought it wa
161 self.m.file.rmtree('Removing dir %s' % d, d)
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698