Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(55)

Side by Side Diff: scripts/slave/recipe_modules/ct_swarming/api.py

Issue 1423993007: CT Perf recipe to run benchmarks on the top 1k sites using swarming (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build@master
Patch Set: Extract reusable functionality into recipe modules Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
4
5 import urllib
M-A Ruel 2015/11/11 23:53:36 Remove
rmistry 2015/11/12 14:39:17 Done.
6
7 from recipe_engine import recipe_api
8
9
10 CT_GS_BUCKET = 'cluster-telemetry'
11
12
13 class CTSwarmingApi(recipe_api.RecipeApi):
14 """Provides steps to run CT tasks on swarming bots."""
15
16 def __init__(self, **kwargs):
17 super(CTSwarmingApi, self).__init__(**kwargs)
18 # Path to the chromium src directory. Will be populated when
19 # checkout_dependencies is called.
20 self.chromium_src_dir = None
21 # Path to where artifacts should be downloaded from Google Storage. Will be
22 # populated when checkout_dependencies is called.
23 self.downloads_dir = None
24 # Path where swarming artifacts (isolate file, json output, etc) will be
25 # stored. Will be populated when checkout dependencies is caled.
26 self.swarming_temp_dir = None
27 # Keep track of all dirs created during recipe execution. Will be cleaned
28 # up by the cleanup step.
29 self._created_dirs = []
30
31 def checkout_dependencies(self):
32 """Checks out all repositories required for CT to run on swarming bots."""
33 # Checkout chromium and swarming.
34 self.m.chromium.set_config('chromium')
35 self.m.gclient.set_config('chromium')
36 self.m.bot_update.ensure_checkout(force=True)
37 self.m.swarming_client.checkout()
38 # Set the paths required by this recipe module.
39 self.chromium_src_dir = self.m.path['checkout']
40 self.downloads_dir = self.chromium_src_dir.join('content', 'test', 'ct')
41 self.swarming_temp_dir = self.m.path['tmp_base'].join('swarming_temp_dir')
42 self._makedirs(self.swarming_temp_dir)
43
44 def _makedirs(self, d):
45 """Creates the specified dir and registers it as a recipe created dir.
46
47 All directories created by this method will be deleted when cleanup() is
48 called.
49 """
50 self.m.file.makedirs('makedirs %s' % d, d)
51 self._created_dirs.append(d)
52
53 def download_CT_binary(self, ct_binary_name):
54 """Downloads the specified CT binary from GS into the downloads_dir."""
55 self.m.gsutil.download(
56 bucket=CT_GS_BUCKET,
57 source='swarming/binaries/%s' % ct_binary_name,
58 dest=self.downloads_dir.join(ct_binary_name))
59
60 def download_page_artifacts(self, page_type, slave_num, local_slave_dir):
61 """Downloads all the artifacts needed to run benchmarks on a page.
62
63 Args:
64 page_type: str. The CT page type. Eg: 1k, 10k.
65 slave_num: int. The number of the slave used to determine which GS
66 directory to download from. Eg: for the top 1k, slave1 will
67 contain webpages 1-10, slave2 will contain 11-20.
68 local_slave_dir: path obj. The directory artifacts should be downloaded
69 to.
70 """
71 # Download page sets.
72 page_sets_dir = local_slave_dir.join('page_sets')
73 self._makedirs(page_sets_dir)
74 self.m.gsutil.download(
75 bucket=CT_GS_BUCKET,
76 source='swarming/page_sets/%s/slave%s/*' % (page_type, slave_num),
77 dest=page_sets_dir)
78
79 # Download archives.
80 wpr_dir = page_sets_dir.join('data')
81 self._makedirs(wpr_dir)
82 self.m.gsutil.download(
83 bucket=CT_GS_BUCKET,
84 source='swarming/webpage_archives/%s/slave%s/*' % (page_type,
85 slave_num),
86 dest=wpr_dir)
87
88 def create_isolated_gen_json(self, isolate_path, base_dir, os_type,
89 slave_num):
90 """Creates an isolated.gen.json file.
91
92 Args:
93 isolate_path: path obj. Path to the isolate file.
94 base_dir: path obj. Dir that is the base of all paths in the isolate file.
95 os_type: str. The OS type to use when archiving the isolate file.
96 Eg: linux.
97 slave_num: int. The slave we want to create isolated.gen.json file for.
98
99 Returns:
100 Path to the isolated.gen.json file.
101 """
102 isolated_path = self.swarming_temp_dir.join(
103 'ct-task-%s.isolated' % slave_num)
104 isolate_args = [
105 '--isolate', isolate_path,
106 '--isolated', isolated_path,
107 '--config-variable', 'OS', os_type,
108 ]
109 isolated_gen_dict = {
110 'version': 1,
111 'dir': base_dir,
112 'args': isolate_args,
113 }
114 isolated_gen_json = self.swarming_temp_dir.join(
115 'slave%s.isolated.gen.json' % slave_num)
116 with open(str(isolated_gen_json), 'w') as fout:
117 fout.write(self.m.json.dumps(isolated_gen_dict, indent=4))
118 return isolated_gen_json
119
120 def batcharchive(self, isolated_gen_json_files):
121 """Calls batcharchive on the specified isolated.gen.json files."""
122 batcharchive_args = [
123 'batcharchive',
124 '--isolate-server', self.m.isolate.isolate_server,
125 '--',
126 ]
127 batcharchive_args.extend(str(i) for i in isolated_gen_json_files)
128 self.m.python(
129 'batcharchiving isolated.gen.json for all slaves',
130 self.m.swarming_client.path.join('isolate.py'),
131 batcharchive_args)
132
133 def trigger_swarming_task(self, task_name, slave_num, dimensions):
134 """Triggers swarming task using the slave's isolated and json output file.
135
136 Args:
137 task_name: The name of the swarming task.
138 slave_num: int. The slave we want to trigger swarming tasks for.
139 dimensions: list of str. The dimensions to run the task on.
140 Eg: ['os Ubuntu', 'gpu 10de']
141 """
142 isolated_path = self.swarming_temp_dir.join(
143 'ct-task-%s.isolated' % slave_num)
144 json_output = self.swarming_temp_dir.join('ct-task-%s.json' % slave_num)
M-A Ruel 2015/11/11 23:53:36 I don't think duplicating swarming.py is a good id
rmistry 2015/11/12 00:07:34 I would like to switch to the Go implementation so
145 swarming_trigger_args = [
146 'trigger',
147 '--task-name', task_name,
148 isolated_path,
149 '--swarming', self.m.swarming.swarming_server,
150 '--isolate-server', self.m.isolate.isolate_server,
151 '--dump-json', json_output
M-A Ruel 2015/11/11 23:53:36 Specify: '--priority', '90' This will run a quite
rmistry 2015/11/12 14:39:17 Done. I brought up a job to see if this worked but
152 ]
153 for d in dimensions:
154 swarming_trigger_args.append('--dimension')
155 swarming_trigger_args.extend(i for i in d.split())
156 self.m.python(
157 'triggering task for slave%s' % slave_num,
158 self.m.swarming_client.path.join('swarming.py'),
159 swarming_trigger_args)
160
161 def collect_swarming_task(self, slave_num):
162 """Collects swarming task for the specified slave."""
163 json_output = self.swarming_temp_dir.join('ct-task-%s.json' % slave_num)
164 swarming_collect_args = [
165 'collect',
166 '--swarming', self.m.swarming.swarming_server,
167 '--json', json_output
168 ]
169 self.m.python(
170 'collecting task for slave%s' % slave_num,
171 self.m.swarming_client.path.join('swarming.py'),
172 swarming_collect_args)
173
174 def cleanup(self):
175 """Cleans up all directories created by this recipe module."""
176 for d in self._created_dirs:
177 self.m.file.rmtree('Removing dir %s' % d, d)
OLDNEW
« no previous file with comments | « scripts/slave/recipe_modules/ct_swarming/__init__.py ('k') | scripts/slave/recipes/perf/ct_top1k_rr_perf.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698