Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(317)

Side by Side Diff: slave/skia_slave_scripts/upload_bench_results.py

Issue 311433002: Revert of Adds uploading picture benchmark JSON data (Closed) Base URL: https://skia.googlesource.com/buildbot.git@master
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """ Upload benchmark performance data results. """ 6 """ Upload benchmark performance data results. """
7 7
8 from build_step import BuildStep 8 from build_step import BuildStep
9 from utils import sync_bucket_subdir 9 from utils import sync_bucket_subdir
10 10
11 import builder_name_schema
12
13 import json 11 import json
14 import os 12 import os
15 import os.path 13 import os.path
16 import posixpath 14 import posixpath
17 import re 15 import re
18 import sys 16 import sys
19 from datetime import datetime 17 from datetime import datetime
20 18
21 19
22 # Modified from Skia repo code, roughly line 108 down of
23 # bench/check_regressions.py
24 def ReadExpectations(filename):
25 """Reads expectations data from file.
26
27 It returns a dictionary containing tuples of the lower, upper, and expected
28 bounds, using the testname and configuration concatenated together as the
29 key."""
30 expectations = {}
31 unstripped_keys = []
32 for expectation in open(filename).readlines():
33 elements = expectation.strip().split(',')
34 if not elements[0] or elements[0].startswith('#'):
35 continue
36 if len(elements) != 5:
37 raise Exception("Invalid expectation line format: %s" %
38 expectation)
39 # [<Bench_BmpConfig_TimeType>,<Platform-Alg>]
40 bench_entry = elements[0] + ',' + elements[1]
41 if bench_entry in unstripped_keys:
42 raise Exception("Dup entries for bench expectation %s" %
43 bench_entry)
44 unstripped_keys.append(bench_entry) # Using this in case multiple lines
45 # share everything except for the
46 # algorithm and/or platform
47 entry = elements[0]
48 # [<Bench_BmpConfig_TimeType>] -> (LB, UB, EXPECTED)
49 expectations[entry] = (float(elements[-2]),
50 float(elements[-1]),
51 float(elements[-3]))
52 return expectations
53
54
55 class UploadBenchResults(BuildStep): 20 class UploadBenchResults(BuildStep):
56 21
57 def __init__(self, attempts=5, **kwargs): 22 def __init__(self, attempts=5, **kwargs):
58 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs) 23 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs)
59 24
60 def _GetPerfDataDir(self): 25 def _GetPerfDataDir(self):
61 return self._perf_data_dir 26 return self._perf_data_dir
62 27
63 def _GetBucketSubdir(self): 28 def _GetBucketSubdir(self):
64 subdirs = ['perfdata', self._builder_name] 29 subdirs = ['perfdata', self._builder_name]
65 if self._is_try: 30 if self._is_try:
66 # Trybots need to isolate their results by build number. 31 # Trybots need to isolate their results by build number.
67 subdirs.append(self._build_number) 32 subdirs.append(self._build_number)
68 return posixpath.join(*subdirs) 33 return posixpath.join(*subdirs)
69 34
70 def _RunInternal(self): 35 def _RunInternal(self):
71 dest_gsbase = (self._args.get('dest_gsbase') or 36 dest_gsbase = (self._args.get('dest_gsbase') or
72 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE) 37 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE)
73 38
74 # Upload the normal bench logs 39 # Upload the normal bench logs
75 sync_bucket_subdir.SyncBucketSubdir( 40 sync_bucket_subdir.SyncBucketSubdir(
76 directory=self._GetPerfDataDir(), 41 directory=self._GetPerfDataDir(),
77 dest_gsbase=dest_gsbase, 42 dest_gsbase=dest_gsbase,
78 subdir=self._GetBucketSubdir(), 43 subdir=self._GetBucketSubdir(),
79 do_upload=True, 44 do_upload=True,
80 exclude_json=True,
81 do_download=False) 45 do_download=False)
82 46
47 # Find and open JSON file to add in additional fields, then upload.
48 json_file_name = None
83 file_list = os.listdir(self._GetPerfDataDir()) 49 file_list = os.listdir(self._GetPerfDataDir())
84 expectations = {}
85
86 path_to_bench_expectations = os.path.join(
87 'expectations',
88 'bench',
89 'bench_expectations_%s.txt' % builder_name_schema.GetWaterfallBot(
90 self._builder_name))
91 try:
92 expectations = ReadExpectations(path_to_bench_expectations)
93 except IOError:
94 print "Unable to open expectations file"
95
96 re_file_extract = re.compile(r'(^.*)\.skp.*$')
97 json_total = {}
98 pictures_timestamp = ''
99 for file_name in file_list:
100 # Find bench picture files, splice in expectation data
101 if not re.search('^bench_{}_data_skp_(.*)_([0-9]*)\.json$'.format(
102 self._got_revision), file_name):
103 continue
104 json_pictures_data = {}
105 if not pictures_timestamp:
106 pictures_timestamp = file_name.split('_')[-1].split('.json')[0]
107 full_file_name = os.path.join(self._GetPerfDataDir(), file_name)
108 with open(full_file_name) as json_pictures:
109 print 'Loading file {}'.format(file_name)
110 json_pictures_data = json.load(json_pictures)
111
112 if json_total:
113 json_total['benches'].extend(json_pictures_data['benches'])
114 else:
115 json_total = json_pictures_data
116
117 # Now add expectations to all keys
118 for bench in json_total['benches']:
119 for tileSet in bench['tileSets']:
120 search_for_name = re_file_extract.search(bench['name'])
121 if not search_for_name:
122 print 'Invalid bench name: {}'.format(bench['name'])
123 continue
124 key = '_'.join([
125 search_for_name.group(1)+'.skp',
126 tileSet['name'],
127 ''])
128 if key in expectations.keys():
129 (lower, upper, expected) = expectations[key]
130 tileSet['lower'] = lower
131 tileSet['upper'] = upper
132 tileSet['expected'] = expected
133 else:
134 print "Unable to find key: {}".format(key)
135
136 json_total['commitHash'] = self._got_revision
137 json_total['machine'] = self._builder_name
138
139 json_write_name = 'skpbench_{}_{}.json'.format(self._got_revision,
140 pictures_timestamp)
141 full_json_write_name = os.path.join(self._GetPerfDataDir(), json_write_name)
142 with open(full_json_write_name, 'w') as json_picture_write:
143 json.dump(json_total, json_picture_write)
144
145 now = datetime.utcnow()
146 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2),
147 str(now.day).zfill(2), str(now.hour).zfill(2)))
148 gs_dir = 'pics-json/{}/{}'.format(gs_json_path, self._builder_name)
149 sync_bucket_subdir.SyncBucketSubdir(
150 directory=self._GetPerfDataDir(),
151 dest_gsbase=dest_gsbase,
152 subdir=gs_dir,
153 # TODO(kelvinly): Set up some way to configure this,
154 # rather than hard coding it
155 do_upload=True,
156 do_download=False,
157 exclude_json=False,
158 filenames_filter=
159 'skpbench_({})_[0-9]+\.json'.format(self._got_revision))
160
161 # Find and open the bench JSON file to add in additional fields, then upload .
162 microbench_json_file = None
163
164 for file_name in file_list: 50 for file_name in file_list:
165 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision), 51 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision),
166 file_name): 52 file_name):
167 microbench_json_file = os.path.join(self._GetPerfDataDir(), file_name) 53 json_file_name = os.path.join(self._GetPerfDataDir(), file_name)
168 break 54 break
169 55
170 if microbench_json_file: 56 if json_file_name:
171 json_data = {} 57 json_data = {}
172 58
173 with open(microbench_json_file) as json_file: 59 with open(json_file_name) as json_file:
174 json_data = json.load(json_file) 60 json_data = json.load(json_file)
175 61
176 json_data['machine'] = self._builder_name 62 json_data['machine'] = self._builder_name
177 json_data['commitHash'] = self._got_revision 63 json_data['commitHash'] = self._got_revision
178 64
179 with open(microbench_json_file, 'w') as json_file: 65 with open(json_file_name, 'w') as json_file:
180 json.dump(json_data, json_file) 66 json.dump(json_data, json_file)
181 67
68 now = datetime.utcnow()
182 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2), 69 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2),
183 str(now.day).zfill(2), str(now.hour).zfill(2))) 70 str(now.day).zfill(2), str(now.hour).zfill(2)))
184 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name) 71 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name)
185 sync_bucket_subdir.SyncBucketSubdir( 72 sync_bucket_subdir.SyncBucketSubdir(
186 directory=self._GetPerfDataDir(), 73 directory=self._GetPerfDataDir(),
187 dest_gsbase=dest_gsbase, 74 dest_gsbase=dest_gsbase,
188 subdir=gs_dir, 75 subdir=gs_dir,
189 # TODO(kelvinly): Set up some way to configure this, 76 # TODO(kelvinly): Set up some way to configure this,
190 # rather than hard coding it 77 # rather than hard coding it
191 do_upload=True, 78 do_upload=True,
192 do_download=False, 79 do_download=False,
193 exclude_json=False,
194 filenames_filter= 80 filenames_filter=
195 'microbench_({})_[0-9]+\.json'.format(self._got_revision)) 81 'microbench_({})_[0-9]+\.json'.format(self._got_revision))
196 82
197 def _Run(self): 83 def _Run(self):
198 self._RunInternal() 84 self._RunInternal()
199 85
200 86
201 if '__main__' == __name__: 87 if '__main__' == __name__:
202 sys.exit(BuildStep.RunBuildStep(UploadBenchResults)) 88 sys.exit(BuildStep.RunBuildStep(UploadBenchResults))
OLDNEW
« no previous file with comments | « slave/skia_slave_scripts/run_bench.py ('k') | slave/skia_slave_scripts/utils/sync_bucket_subdir.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698