Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(361)

Side by Side Diff: slave/skia_slave_scripts/upload_bench_results.py

Issue 304393002: Adds uploading picture benchmark JSON data (Closed) Base URL: https://skia.googlesource.com/buildbot.git@master
Patch Set: Fix broken upload code Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """ Upload benchmark performance data results. """ 6 """ Upload benchmark performance data results. """
7 7
8 from build_step import BuildStep 8 from build_step import BuildStep
9 from utils import sync_bucket_subdir 9 from utils import sync_bucket_subdir
10 10
11 import builder_name_schema
12
11 import json 13 import json
12 import os 14 import os
13 import os.path 15 import os.path
14 import posixpath 16 import posixpath
15 import re 17 import re
16 import sys 18 import sys
17 from datetime import datetime 19 from datetime import datetime
18 20
19 21
22 # Modified from Skia repo code, roughly line 108 down of
23 # bench/check_regressions.py
24 def ReadExpectations(filename):
25 """Reads expectations data from file.
26
27 It returns a dictionary containing tuples of the lower, upper, and expected
28 bounds, using the testname and configuration concatenated together as the
29 key."""
30 expectations = {}
31 unstripped_keys = []
32 for expectation in open(filename).readlines():
33 elements = expectation.strip().split(',')
34 if not elements[0] or elements[0].startswith('#'):
35 continue
36 if len(elements) != 5:
37 raise Exception("Invalid expectation line format: %s" %
38 expectation)
39 # [<Bench_BmpConfig_TimeType>,<Platform-Alg>]
40 bench_entry = elements[0] + ',' + elements[1]
41 if bench_entry in unstripped_keys:
42 raise Exception("Dup entries for bench expectation %s" %
43 bench_entry)
44 unstripped_keys.append(bench_entry) # Using this in case multiple lines
45 # share everything except for the
46 # algorithm and/or platform
47 entry = elements[0]
48 # [<Bench_BmpConfig_TimeType>] -> (LB, UB, EXPECTED)
49 expectations[entry] = (float(elements[-2]),
50 float(elements[-1]),
51 float(elements[-3]))
52 return expectations
53
54
20 class UploadBenchResults(BuildStep): 55 class UploadBenchResults(BuildStep):
21 56
22 def __init__(self, attempts=5, **kwargs): 57 def __init__(self, attempts=5, **kwargs):
23 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs) 58 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs)
24 59
25 def _GetPerfDataDir(self): 60 def _GetPerfDataDir(self):
26 return self._perf_data_dir 61 return self._perf_data_dir
27 62
28 def _GetBucketSubdir(self): 63 def _GetBucketSubdir(self):
29 subdirs = ['perfdata', self._builder_name] 64 subdirs = ['perfdata', self._builder_name]
30 if self._is_try: 65 if self._is_try:
31 # Trybots need to isolate their results by build number. 66 # Trybots need to isolate their results by build number.
32 subdirs.append(self._build_number) 67 subdirs.append(self._build_number)
33 return posixpath.join(*subdirs) 68 return posixpath.join(*subdirs)
34 69
35 def _RunInternal(self): 70 def _RunInternal(self):
36 dest_gsbase = (self._args.get('dest_gsbase') or 71 dest_gsbase = (self._args.get('dest_gsbase') or
37 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE) 72 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE)
38 73
39 # Upload the normal bench logs 74 # Upload the normal bench logs
40 sync_bucket_subdir.SyncBucketSubdir( 75 sync_bucket_subdir.SyncBucketSubdir(
41 directory=self._GetPerfDataDir(), 76 directory=self._GetPerfDataDir(),
42 dest_gsbase=dest_gsbase, 77 dest_gsbase=dest_gsbase,
43 subdir=self._GetBucketSubdir(), 78 subdir=self._GetBucketSubdir(),
44 do_upload=True, 79 do_upload=True,
80 exclude_json=True,
45 do_download=False) 81 do_download=False)
46 82
47 # Find and open JSON file to add in additional fields, then upload.
48 json_file_name = None
49 file_list = os.listdir(self._GetPerfDataDir()) 83 file_list = os.listdir(self._GetPerfDataDir())
84 expectations = {}
85
86 path_to_bench_expectations = os.path.join(
87 'expectations',
88 'bench',
89 'bench_expectations_%s.txt' % builder_name_schema.GetWaterfallBot(
90 self._builder_name))
91 try:
92 expectations = ReadExpectations(path_to_bench_expectations)
93 except IOError:
94 print "Unable to open expectations file"
95
96 re_file_extract = re.compile(r'(^.*)\.skp.*$')
97 json_total = {}
98 pictures_timestamp = ''
99 for file_name in file_list:
100 # Find bench picture files, splice in expectation data
101 if not re.search('^bench_{}_data_skp_(.*)_([0-9]*)\.json$'.format(
102 self._got_revision), file_name):
103 continue
104 json_pictures_data = {}
105 if not pictures_timestamp:
106 pictures_timestamp = file_name.split('_')[-1].split('.json')[0]
107 full_file_name = os.path.join(self._GetPerfDataDir(), file_name)
108 with open(full_file_name) as json_pictures:
109 print 'Loading file {}'.format(file_name)
110 json_pictures_data = json.load(json_pictures)
111
112 if json_total:
113 json_total['benches'].extend(json_pictures_data['benches'])
114 else:
115 json_total = json_pictures_data
116
117 # Now add expectations to all keys
118 for bench in json_total['benches']:
119 for tileSet in bench['tileSets']:
120 search_for_name = re_file_extract.search(bench['name'])
121 if not search_for_name:
122 print 'Invalid bench name: {}'.format(bench['name'])
123 continue
124 key = '_'.join([
125 search_for_name.group(1)+'.skp',
126 tileSet['name'],
127 ''])
128 if key in expectations.keys():
129 (lower, upper, expected) = expectations[key]
130 tileSet['lower'] = lower
131 tileSet['upper'] = upper
132 tileSet['expected'] = expected
133 else:
134 print "Unable to find key: {}".format(key)
135
136 json_total['commitHash'] = self._got_revision
137 json_total['machine'] = self._builder_name
138
139 json_write_name = 'skpbench_{}_{}.json'.format(self._got_revision,
140 pictures_timestamp)
141 full_json_write_name = os.path.join(self._GetPerfDataDir(), json_write_name)
142 with open(full_json_write_name, 'w') as json_picture_write:
143 json.dump(json_total, json_picture_write)
144
145 now = datetime.utcnow()
146 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2),
147 str(now.day).zfill(2), str(now.hour).zfill(2)))
148 gs_dir = 'pics-json/{}/{}'.format(gs_json_path, self._builder_name)
149 sync_bucket_subdir.SyncBucketSubdir(
150 directory=self._GetPerfDataDir(),
151 dest_gsbase=dest_gsbase,
152 subdir=gs_dir,
153 # TODO(kelvinly): Set up some way to configure this,
154 # rather than hard coding it
155 do_upload=True,
156 do_download=False,
157 exclude_json=False,
158 filenames_filter=
159 'skpbench_({})_[0-9]+\.json'.format(self._got_revision))
160
161 # Find and open the bench JSON file to add in additional fields, then upload .
162 microbench_json_file = None
163
50 for file_name in file_list: 164 for file_name in file_list:
51 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision), 165 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision),
52 file_name): 166 file_name):
53 json_file_name = os.path.join(self._GetPerfDataDir(), file_name) 167 microbench_json_file = os.path.join(self._GetPerfDataDir(), file_name)
54 break 168 break
55 169
56 if json_file_name: 170 if microbench_json_file:
57 json_data = {} 171 json_data = {}
58 172
59 with open(json_file_name) as json_file: 173 with open(microbench_json_file) as json_file:
60 json_data = json.load(json_file) 174 json_data = json.load(json_file)
61 175
62 json_data['machine'] = self._builder_name 176 json_data['machine'] = self._builder_name
63 json_data['commitHash'] = self._got_revision 177 json_data['commitHash'] = self._got_revision
64 178
65 with open(json_file_name, 'w') as json_file: 179 with open(microbench_json_file, 'w') as json_file:
66 json.dump(json_data, json_file) 180 json.dump(json_data, json_file)
67 181
68 now = datetime.utcnow()
69 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2), 182 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2),
70 str(now.day).zfill(2), str(now.hour).zfill(2))) 183 str(now.day).zfill(2), str(now.hour).zfill(2)))
71 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name) 184 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name)
72 sync_bucket_subdir.SyncBucketSubdir( 185 sync_bucket_subdir.SyncBucketSubdir(
73 directory=self._GetPerfDataDir(), 186 directory=self._GetPerfDataDir(),
74 dest_gsbase=dest_gsbase, 187 dest_gsbase=dest_gsbase,
75 subdir=gs_dir, 188 subdir=gs_dir,
76 # TODO(kelvinly): Set up some way to configure this, 189 # TODO(kelvinly): Set up some way to configure this,
77 # rather than hard coding it 190 # rather than hard coding it
78 do_upload=True, 191 do_upload=True,
79 do_download=False, 192 do_download=False,
193 exclude_json=False,
80 filenames_filter= 194 filenames_filter=
81 'microbench_({})_[0-9]+\.json'.format(self._got_revision)) 195 'microbench_({})_[0-9]+\.json'.format(self._got_revision))
82 196
83 def _Run(self): 197 def _Run(self):
84 self._RunInternal() 198 self._RunInternal()
85 199
86 200
87 if '__main__' == __name__: 201 if '__main__' == __name__:
88 sys.exit(BuildStep.RunBuildStep(UploadBenchResults)) 202 sys.exit(BuildStep.RunBuildStep(UploadBenchResults))
OLDNEW
« no previous file with comments | « slave/skia_slave_scripts/run_bench.py ('k') | slave/skia_slave_scripts/utils/sync_bucket_subdir.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698