Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2403)

Unified Diff: scripts/slave/recipes/perf/ct_top1k_rr_perf.py

Issue 1423993007: CT Perf recipe to run benchmarks on the top 1k sites using swarming (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build@master
Patch Set: Address feedback Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « masters/master.chromium.perf.fyi/slaves.cfg ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: scripts/slave/recipes/perf/ct_top1k_rr_perf.py
diff --git a/scripts/slave/recipes/perf/ct_top1k_rr_perf.py b/scripts/slave/recipes/perf/ct_top1k_rr_perf.py
new file mode 100644
index 0000000000000000000000000000000000000000..78ab2354cc2a766f4d22197dc5f2d7db23f1420c
--- /dev/null
+++ b/scripts/slave/recipes/perf/ct_top1k_rr_perf.py
@@ -0,0 +1,186 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+DEPS = [
+ 'archive',
+ 'bot_update',
+ 'chromium',
+ 'file',
+ 'gclient',
+ 'gsutil',
+ 'isolate',
+ 'path',
+ 'platform',
+ 'properties',
+ 'python',
+ 'step',
+ 'time',
+ 'swarming',
+ 'swarming_client',
+ 'zip',
+]
+
+
+CT_BUCKET = 'cluster-telemetry'
+CT_PAGE_TYPE = '1k'
+CT_BINARY = 'run_chromium_perf_swarming'
+CT_ISOLATE_TEMPLATE = 'ct_top1k.isolate.tmpl'
+
+# Number of slaves to shard CT runs to.
+# TODO(rmistry): Change the below to 100 when ready to run the full top 1k.
+CT_NUM_SLAVES = 2
+
+
+def _DownloadAndExtractBinary(api):
+ """Downloads the binary from the revision passed to the recipe."""
+ build_archive_url = api.properties['parent_build_archive_url']
+ api.archive.download_and_unzip_build(
+ step_name='Download and Extract Binary',
+ target='Release',
+ build_url=None, # This is a required parameter, but has no effect.
+ build_archive_url=build_archive_url)
+
+
+# TODO(rmistry): What priority can I give the below tasks??
+def RunSteps(api):
+ # Figure out which benchmark to use.
+ buildername = api.properties['buildername']
+ if 'Repaint' in buildername:
+ benchmark = 'repaint'
+ elif 'RR' in buildername:
+ benchmark = 'rasterize_and_record_micro'
+ else:
+ raise Exception('Do not recognise the buildername %s.' % buildername)
+
+ # Checkout chromium and swarming.
+ api.chromium.set_config('chromium')
+ api.gclient.set_config('chromium')
+ api.bot_update.ensure_checkout(force=True)
+ api.swarming_client.checkout()
+
+ # Download the prebuilt chromium binary.
+ _DownloadAndExtractBinary(api)
+
+ # Path to the chromium src directory.
+ chromium_src_dir = api.path['checkout']
+ # Path to where artifacts should be downloaded from Google Storage.
+ downloads_dir = chromium_src_dir.join('content', 'test', 'ct')
+ # Path where swarming artifacts (isolate file, json output) will be stored.
+ swarming_temp_dir = api.path['tmp_base'].join('swarming_temp_dir')
+ api.file.makedirs('makedirs swarming_temp_dir', swarming_temp_dir)
+
+ # Download Cluster Telemetry binary.
+ ct_binary_path = downloads_dir.join(CT_BINARY)
+ api.gsutil.download(
+ bucket=CT_BUCKET,
+ source='swarming/binaries/%s' % CT_BINARY,
+ dest=ct_binary_path)
+
+ # Record how long the step took in swarming tasks.
+ swarming_start_time = api.time.time()
+
+ for slave_num in range(1, CT_NUM_SLAVES + 1):
+ slave_dir = downloads_dir.join('slave%s' % slave_num)
+ api.file.makedirs('makedirs slave_dir', slave_dir)
+
+ # Download page sets.
+ page_sets_dir = slave_dir.join('page_sets')
+ api.file.makedirs('makedirs page_sets', page_sets_dir)
+ api.gsutil.download(
+ bucket=CT_BUCKET,
+ source='swarming/page_sets/%s/slave%s/*' % (CT_PAGE_TYPE, slave_num),
+ dest=page_sets_dir)
+
+ # Download archives.
+ wpr_dir = page_sets_dir.join('data')
+ api.file.makedirs('makedirs wpr', wpr_dir)
+ api.gsutil.download(
+ bucket=CT_BUCKET,
+ source='swarming/webpage_archives/%s/slave%s/*' % (CT_PAGE_TYPE,
+ slave_num),
+ dest=wpr_dir)
+
+ # TODO(rmistry): Remove the entire below section after crrev.com/1410353007
+ # is submitted.
+ api.file.copy(
+ 'copy %s' % CT_ISOLATE_TEMPLATE,
+ '/repos/chromium/src/chrome/%s' % CT_ISOLATE_TEMPLATE,
+ chromium_src_dir.join('chrome', CT_ISOLATE_TEMPLATE))
+ for f in ['run_ct_top1k.py', 'path_util.py']:
+ api.file.copy(
+ 'copy %s' % f,
+ '/repos/chromium/src/content/test/ct/%s' % f,
+ chromium_src_dir.join('content', 'test', 'ct', f))
+
+ # Create this slave's isolate file from the CT_ISOLATE_TEMPLATE.
+ isolate_dir = chromium_src_dir.join('chrome')
+ isolate_template_path = isolate_dir.join(CT_ISOLATE_TEMPLATE)
+ generated_isolate_path = isolate_dir.join('ct_top1k.isolate')
+ with open(str(generated_isolate_path), 'wb') as fout:
+ with open(str(isolate_template_path), 'rb') as fin:
+ for line in fin:
+ fout.write(line.replace('[[SLAVE_NUM]]', str(slave_num))
+ .replace('[[MASTER]]', api.properties['mastername'])
+ .replace('[[BUILDER]]', api.properties['buildername'])
+ .replace('[[GIT_HASH]]',
+ api.properties['git_revision'])
+ .replace('[[BENCHMARK]]', benchmark))
+
+ # Archive everything on the isolate server.
M-A Ruel 2015/11/09 16:51:21 Actually, you *really* want to use batcharchive an
rmistry 2015/11/09 19:37:24 Done. PTAL.
+ isolated_path = swarming_temp_dir.join('ct-1k-task-%s.isolated' % slave_num)
+ isolate_args = [
+ 'archive',
+ '--isolate', generated_isolate_path,
+ '--isolated', isolated_path,
+ '--config-variable', 'OS', 'linux',
+ '--isolate-server', api.isolate.isolate_server,
+ # TODO(rmistry): Why do I need PRODUCT_DIR ? fails without it. It also
+ # requires bitmaptools in PRODUCT_DIR.
+ '--path-variable', 'PRODUCT_DIR', api.path['tmp_base'],
+ ]
+ api.python(
+ 'archiving isolate for slave%s' % slave_num,
+ api.swarming_client.path.join('isolate.py'),
+ isolate_args)
+
+ # Trigger swarming task.
+ task_name = 'ct-1k-task-%s' % slave_num
+ json_output = swarming_temp_dir.join('ct-1k-task-%s.json' % slave_num)
+ swarming_trigger_args = [
+ 'trigger',
+ '--task-name', task_name,
+ isolated_path,
+ '--swarming', api.swarming.swarming_server,
+ '--dimension', 'os', 'Ubuntu',
+ '--dimension', 'gpu', '10de',
+ '--isolate-server', api.isolate.isolate_server,
+ '--dump-json', json_output
+ ]
+ api.python(
+ 'triggering task for slave%s' % slave_num,
+ api.swarming_client.path.join('swarming.py'),
+ swarming_trigger_args)
+
+ # We have triggered this slave's swarming task. Cleanup slave artifacts.
+ api.file.rmtree('Remove slave dir', slave_dir)
+
+ # Now collect all tasks.
+ for slave_num in range(1, CT_NUM_SLAVES + 1):
+ json_output = swarming_temp_dir.join('ct-1k-task-%s.json' % slave_num)
+ swarming_collect_args = [
+ 'collect',
+ '--swarming', api.swarming.swarming_server,
+ '--json', json_output
+ ]
+ api.python(
+ 'collecting task for slave%s' % slave_num,
+ api.swarming_client.path.join('swarming.py'),
+ swarming_collect_args)
+
+ # Cleanup the temporary swarming dir.
+ api.file.rmtree('Remove swarming temp dir', swarming_temp_dir)
+
+ print ('Running isolating, triggering and collecting swarming tasks took a '
+ 'total of %s seconds') % (api.time.time() - swarming_start_time)
« no previous file with comments | « masters/master.chromium.perf.fyi/slaves.cfg ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698