Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """ Upload benchmark performance data results. """ | 6 """ Upload benchmark performance data results. """ |
| 7 | 7 |
| 8 from build_step import BuildStep | 8 from build_step import BuildStep |
| 9 from utils import sync_bucket_subdir | 9 from utils import sync_bucket_subdir |
| 10 | 10 |
| 11 import builder_name_schema | |
| 12 | |
| 11 import json | 13 import json |
| 12 import os | 14 import os |
| 13 import os.path | 15 import os.path |
| 14 import posixpath | 16 import posixpath |
| 15 import re | 17 import re |
| 16 import sys | 18 import sys |
| 17 from datetime import datetime | 19 from datetime import datetime |
| 18 | 20 |
| 19 | 21 |
| 22 # Modified from Skia repo code | |
|
benchen
2014/05/29 22:20:09
Please add the original file name/location to the
kelvinly
2014/05/30 00:44:51
Done.
kelvinly
2014/05/30 00:44:51
Done.
| |
| 23 def ReadExpectations(filename): | |
| 24 """Reads expectations data from file.""" | |
| 25 expectations = {} | |
| 26 unstripped_keys = [] | |
| 27 for expectation in open(filename).readlines(): | |
| 28 elements = expectation.strip().split(',') | |
| 29 if not elements[0] or elements[0].startswith('#'): | |
| 30 continue | |
| 31 if len(elements) != 5: | |
| 32 raise Exception("Invalid expectation line format: %s" % | |
| 33 expectation) | |
| 34 bench_entry = elements[0] + ',' + elements[1] | |
| 35 if bench_entry in unstripped_keys: | |
| 36 raise Exception("Dup entries for bench expectation %s" % | |
| 37 bench_entry) | |
| 38 # bench_entry: [<Bench_BmpConfig_TimeType>,<Platform-Alg>] | |
|
benchen
2014/05/29 22:20:09
This and the comment below were legacy codes with
kelvinly
2014/05/30 00:44:51
I find the last comment fairly helpful though, wha
benchen
2014/05/30 00:57:23
Just that it's not recommended style.. the format
kelvinly
2014/05/30 02:15:31
Done.
| |
| 39 unstripped_keys.append(bench_entry) # Using this in case multiple lines | |
| 40 # share everything except for the | |
| 41 # algorithm and/or platform | |
| 42 entry = elements[0] | |
| 43 # [<Bench_BmpConfig_TimeType>] -> (LB, UB, EXPECTED) | |
| 44 expectations[entry] = (float(elements[-2]), | |
| 45 float(elements[-1]), | |
| 46 float(elements[-3])) | |
| 47 return expectations | |
| 48 | |
| 49 | |
| 20 class UploadBenchResults(BuildStep): | 50 class UploadBenchResults(BuildStep): |
| 21 | 51 |
| 22 def __init__(self, attempts=5, **kwargs): | 52 def __init__(self, attempts=5, **kwargs): |
| 23 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs) | 53 super(UploadBenchResults, self).__init__(attempts=attempts, **kwargs) |
| 24 | 54 |
| 25 def _GetPerfDataDir(self): | 55 def _GetPerfDataDir(self): |
| 26 return self._perf_data_dir | 56 return self._perf_data_dir |
| 27 | 57 |
| 28 def _GetBucketSubdir(self): | 58 def _GetBucketSubdir(self): |
| 29 subdirs = ['perfdata', self._builder_name] | 59 subdirs = ['perfdata', self._builder_name] |
| 30 if self._is_try: | 60 if self._is_try: |
| 31 # Trybots need to isolate their results by build number. | 61 # Trybots need to isolate their results by build number. |
| 32 subdirs.append(self._build_number) | 62 subdirs.append(self._build_number) |
| 33 return posixpath.join(*subdirs) | 63 return posixpath.join(*subdirs) |
| 34 | 64 |
| 35 def _RunInternal(self): | 65 def _RunInternal(self): |
| 36 dest_gsbase = (self._args.get('dest_gsbase') or | 66 dest_gsbase = (self._args.get('dest_gsbase') or |
| 37 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE) | 67 sync_bucket_subdir.DEFAULT_PERFDATA_GS_BASE) |
| 38 | 68 |
| 39 # Upload the normal bench logs | 69 # Upload the normal bench logs |
| 40 sync_bucket_subdir.SyncBucketSubdir( | 70 sync_bucket_subdir.SyncBucketSubdir( |
| 41 directory=self._GetPerfDataDir(), | 71 directory=self._GetPerfDataDir(), |
| 42 dest_gsbase=dest_gsbase, | 72 dest_gsbase=dest_gsbase, |
| 43 subdir=self._GetBucketSubdir(), | 73 subdir=self._GetBucketSubdir(), |
| 44 do_upload=True, | 74 do_upload=True, |
| 45 do_download=False) | 75 do_download=False) |
| 46 | 76 |
| 77 file_list = os.listdir(self._GetPerfDataDir()) | |
| 78 expectations = {} | |
| 79 | |
| 80 path_to_bench_expectations = os.path.join( | |
| 81 'expectations', | |
| 82 'bench', | |
| 83 'bench_expectations_%s.txt' % builder_name_schema.GetWaterfallBot( | |
| 84 self._builder_name)) | |
| 85 try: | |
| 86 expectations = ReadExpectations(path_to_bench_expectations) | |
| 87 except IOError: | |
| 88 print "Unable to open expectations file" | |
| 89 | |
| 90 re_file_extract = re.compile(r'(^.*)\.skp.*$') | |
| 91 json_total = {} | |
| 92 pictures_timestamp = '' | |
| 93 for file_name in file_list: | |
| 94 # Find bench picture files, splice in expectation data | |
| 95 if not re.search('^bench_{}_data_skp_(.*)_([0-9]*)\.json$'.format( | |
| 96 self._got_revision), file_name): | |
| 97 continue | |
| 98 json_pictures_data = {} | |
| 99 if not pictures_timestamp: | |
| 100 pictures_timestamp = file_name.split('_')[-1].split('.json')[0] | |
| 101 full_file_name = os.path.join(self._GetPerfDataDir(), file_name) | |
| 102 with open(full_file_name) as json_pictures: | |
| 103 print 'Loading file {}'.format(file_name) | |
| 104 json_pictures_data = json.load(json_pictures) | |
| 105 | |
| 106 if json_total: | |
| 107 json_total['benches'].extend(json_pictures_data['benches']) | |
| 108 else: | |
| 109 json_total = json_pictures_data | |
| 110 | |
| 111 # Now add expectations to all keys | |
| 112 for bench in json_total['benches']: | |
| 113 for tileSet in bench['tileSets']: | |
| 114 search_for_name = re_file_extract.search(bench['name']) | |
| 115 if not search_for_name: | |
| 116 print 'Invalid bench name: {}'.format(bench['name']) | |
| 117 continue | |
| 118 key = '_'.join([ | |
| 119 search_for_name.group(1)+'.skp', | |
| 120 tileSet['name'], | |
| 121 '']) | |
| 122 if key in expectations.keys(): | |
| 123 (lower, upper, expected) = expectations[key] | |
| 124 tileSet['lower'] = lower | |
| 125 tileSet['upper'] = upper | |
| 126 tileSet['expected'] = expected | |
| 127 else: | |
| 128 print "Unable to find key: {}".format(key) | |
| 129 | |
| 130 json_total['commitHash'] = self._got_revision | |
| 131 json_total['machine'] = self._builder_name | |
| 132 | |
| 133 json_write_name = 'pictures_{}_{}.json'.format(self._got_revision, | |
|
benchen
2014/05/29 22:20:09
To be consistent, please use file name skpbench_*.
kelvinly
2014/05/30 00:44:51
Done.
| |
| 134 pictures_timestamp) | |
| 135 full_json_write_name = os.path.join(self._GetPerfDataDir(), json_write_name) | |
| 136 with open(full_json_write_name, 'w') as json_picture_write: | |
| 137 json.dump(json_total, json_picture_write) | |
| 138 | |
| 139 now = datetime.utcnow() | |
| 140 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2), | |
| 141 str(now.day).zfill(2), str(now.hour).zfill(2))) | |
| 142 gs_dir = 'pics-json/{}/{}'.format(gs_json_path, self._builder_name) | |
| 143 sync_bucket_subdir.SyncBucketSubdir( | |
| 144 directory=self._GetPerfDataDir(), | |
| 145 dest_gsbase=dest_gsbase, | |
| 146 subdir=gs_dir, | |
| 147 # TODO(kelvinly): Set up some way to configure this, | |
| 148 # rather than hard coding it | |
| 149 do_upload=True, | |
| 150 do_download=False, | |
| 151 exclude_json=False, | |
| 152 filenames_filter= | |
| 153 'pictures_({})_[0-9]+\.json'.format(self._got_revision)) | |
| 154 | |
| 47 # Find and open JSON file to add in additional fields, then upload. | 155 # Find and open JSON file to add in additional fields, then upload. |
|
benchen
2014/05/29 22:20:09
This part is now microbench only. Should update th
kelvinly
2014/05/30 00:44:51
Done.
| |
| 48 json_file_name = None | 156 json_file_name = None |
| 49 file_list = os.listdir(self._GetPerfDataDir()) | 157 |
| 50 for file_name in file_list: | 158 for file_name in file_list: |
| 51 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision), | 159 if re.search('microbench_({})_[0-9]+\.json'.format(self._got_revision), |
| 52 file_name): | 160 file_name): |
| 53 json_file_name = os.path.join(self._GetPerfDataDir(), file_name) | 161 json_file_name = os.path.join(self._GetPerfDataDir(), file_name) |
| 54 break | 162 break |
| 55 | 163 |
| 56 if json_file_name: | 164 if json_file_name: |
| 57 json_data = {} | 165 json_data = {} |
| 58 | 166 |
| 59 with open(json_file_name) as json_file: | 167 with open(json_file_name) as json_file: |
| 60 json_data = json.load(json_file) | 168 json_data = json.load(json_file) |
| 61 | 169 |
| 62 json_data['machine'] = self._builder_name | 170 json_data['machine'] = self._builder_name |
| 63 json_data['commitHash'] = self._got_revision | 171 json_data['commitHash'] = self._got_revision |
| 64 | 172 |
| 65 with open(json_file_name, 'w') as json_file: | 173 with open(json_file_name, 'w') as json_file: |
| 66 json.dump(json_data, json_file) | 174 json.dump(json_data, json_file) |
| 67 | 175 |
| 68 now = datetime.utcnow() | |
| 69 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2), | 176 gs_json_path = '/'.join((str(now.year).zfill(4), str(now.month).zfill(2), |
| 70 str(now.day).zfill(2), str(now.hour).zfill(2))) | 177 str(now.day).zfill(2), str(now.hour).zfill(2))) |
| 71 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name) | 178 gs_dir = 'stats-json/{}/{}'.format(gs_json_path, self._builder_name) |
| 72 sync_bucket_subdir.SyncBucketSubdir( | 179 sync_bucket_subdir.SyncBucketSubdir( |
| 73 directory=self._GetPerfDataDir(), | 180 directory=self._GetPerfDataDir(), |
| 74 dest_gsbase=dest_gsbase, | 181 dest_gsbase=dest_gsbase, |
| 75 subdir=gs_dir, | 182 subdir=gs_dir, |
| 76 # TODO(kelvinly): Set up some way to configure this, | 183 # TODO(kelvinly): Set up some way to configure this, |
| 77 # rather than hard coding it | 184 # rather than hard coding it |
| 78 do_upload=True, | 185 do_upload=True, |
| 79 do_download=False, | 186 do_download=False, |
| 187 exclude_json=False, | |
| 80 filenames_filter= | 188 filenames_filter= |
| 81 'microbench_({})_[0-9]+\.json'.format(self._got_revision)) | 189 'microbench_({})_[0-9]+\.json'.format(self._got_revision)) |
| 82 | 190 |
| 83 def _Run(self): | 191 def _Run(self): |
| 84 self._RunInternal() | 192 self._RunInternal() |
| 85 | 193 |
| 86 | 194 |
| 87 if '__main__' == __name__: | 195 if '__main__' == __name__: |
| 88 sys.exit(BuildStep.RunBuildStep(UploadBenchResults)) | 196 sys.exit(BuildStep.RunBuildStep(UploadBenchResults)) |
| OLD | NEW |