OLD | NEW |
1 ''' | 1 ''' |
2 Created on May 16, 2011 | 2 Created on May 16, 2011 |
3 | 3 |
4 @author: bungeman | 4 @author: bungeman |
5 ''' | 5 ''' |
6 import bench_util | 6 import bench_util |
7 import getopt | 7 import getopt |
8 import httplib | 8 import httplib |
9 import itertools | 9 import itertools |
10 import json | 10 import json |
11 import os | 11 import os |
12 import re | 12 import re |
13 import sys | 13 import sys |
14 import urllib | 14 import urllib |
15 import urllib2 | 15 import urllib2 |
16 import xml.sax.saxutils | 16 import xml.sax.saxutils |
17 | 17 |
18 # We throw out any measurement outside this range, and log a warning. | 18 # Maximum expected number of characters we expect in an svn revision. |
19 MIN_REASONABLE_TIME = 0 | 19 MAX_SVN_REV_LENGTH = 5 |
20 MAX_REASONABLE_TIME = 99999 | |
21 | |
22 # Constants for prefixes in output title used in buildbot. | |
23 TITLE_PREAMBLE = 'Bench_Performance_for_' | |
24 TITLE_PREAMBLE_LENGTH = len(TITLE_PREAMBLE) | |
25 | 20 |
26 def usage(): | 21 def usage(): |
27 """Prints simple usage information.""" | 22 """Prints simple usage information.""" |
28 | 23 |
29 print '-a <url> the url to use for adding bench values to app engine app.' | 24 print '-a <representation_alg> bench representation algorithm to use. ' |
30 print ' Example: "https://skiadash.appspot.com/add_point".' | 25 print ' Defaults to "25th". See bench_util.py for details.' |
31 print ' If not set, will skip this step.' | 26 print '-b <builder> name of the builder whose bench data we are checking.' |
32 print '-b <bench> the bench to show.' | 27 print '-d <dir> a directory containing bench_<revision>_<scalar> files.' |
33 print '-c <config> the config to show (GPU, 8888, 565, etc).' | |
34 print '-d <dir> a directory containing bench_r<revision>_<scalar> files.' | |
35 print '-e <file> file containing expected bench builder values/ranges.' | 28 print '-e <file> file containing expected bench builder values/ranges.' |
36 print ' Will raise exception if actual bench values are out of range.' | 29 print ' Will raise exception if actual bench values are out of range.' |
37 print ' See bench_expectations_<builder>.txt for data format / examples.' | 30 print ' See bench_expectations_<builder>.txt for data format / examples.' |
38 print '-f <revision>[:<revision>] the revisions to use for fitting.' | 31 print '-r <revision> the git commit hash or svn revision for checking ' |
39 print ' Negative <revision> is taken as offset from most recent revision.' | 32 print ' bench values.' |
40 print '-i <time> the time to ignore (w, c, g, etc).' | 33 |
41 print ' The flag is ignored when -t is set; otherwise we plot all the' | |
42 print ' times except the one specified here.' | |
43 print '-l <title> title to use for the output graph' | |
44 print '-m <representation> representation of bench value.' | |
45 print ' See _ListAlgorithm class in bench_util.py.' | |
46 print '-o <path> path to which to write output.' | |
47 print '-r <revision>[:<revision>] the revisions to show.' | |
48 print ' Negative <revision> is taken as offset from most recent revision.' | |
49 print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).' | |
50 print '-t <time> the time to show (w, c, g, etc).' | |
51 print '-x <int> the desired width of the svg.' | |
52 print '-y <int> the desired height of the svg.' | |
53 print '--default-setting <setting>[=<value>] setting for those without.' | |
54 | |
55 | 34 |
56 class Label: | 35 class Label: |
57 """The information in a label. | 36 """The information in a label. |
58 | 37 |
59 (str, str, str, str, {str:str})""" | 38 (str, str, str, str, {str:str})""" |
60 def __init__(self, bench, config, time_type, settings): | 39 def __init__(self, bench, config, time_type, settings): |
61 self.bench = bench | 40 self.bench = bench |
62 self.config = config | 41 self.config = config |
63 self.time_type = time_type | 42 self.time_type = time_type |
64 self.settings = settings | 43 self.settings = settings |
65 | 44 |
66 def __repr__(self): | 45 def __repr__(self): |
67 return "Label(%s, %s, %s, %s)" % ( | 46 return "Label(%s, %s, %s, %s)" % ( |
68 str(self.bench), | 47 str(self.bench), |
69 str(self.config), | 48 str(self.config), |
70 str(self.time_type), | 49 str(self.time_type), |
71 str(self.settings), | 50 str(self.settings), |
72 ) | 51 ) |
73 | 52 |
74 def __str__(self): | 53 def __str__(self): |
75 return "%s_%s_%s_%s" % ( | 54 return "%s_%s_%s_%s" % ( |
76 str(self.bench), | 55 str(self.bench), |
77 str(self.config), | 56 str(self.config), |
78 str(self.time_type), | 57 str(self.time_type), |
79 str(self.settings), | 58 str(self.settings), |
80 ) | 59 ) |
81 | 60 |
82 def __eq__(self, other): | 61 def __eq__(self, other): |
83 return (self.bench == other.bench and | 62 return (self.bench == other.bench and |
84 self.config == other.config and | 63 self.config == other.config and |
85 self.time_type == other.time_type and | 64 self.time_type == other.time_type and |
86 self.settings == other.settings) | 65 self.settings == other.settings) |
87 | 66 |
88 def __hash__(self): | 67 def __hash__(self): |
89 return (hash(self.bench) ^ | 68 return (hash(self.bench) ^ |
90 hash(self.config) ^ | 69 hash(self.config) ^ |
91 hash(self.time_type) ^ | 70 hash(self.time_type) ^ |
92 hash(frozenset(self.settings.iteritems()))) | 71 hash(frozenset(self.settings.iteritems()))) |
93 | 72 |
94 def get_latest_revision(directory): | 73 def parse_dir(directory, default_settings, revision, rep): |
95 """Returns the latest revision number found within this directory. | 74 """Parses bench data from bench logs files. |
| 75 revision can be either svn revision or git commit hash. |
96 """ | 76 """ |
97 latest_revision_found = -1 | 77 revision_data_points = [] # list of BenchDataPoint |
98 for bench_file in os.listdir(directory): | |
99 file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file) | |
100 if (file_name_match is None): | |
101 continue | |
102 revision = int(file_name_match.group(1)) | |
103 if revision > latest_revision_found: | |
104 latest_revision_found = revision | |
105 if latest_revision_found < 0: | |
106 return None | |
107 else: | |
108 return latest_revision_found | |
109 | |
110 def parse_dir(directory, default_settings, oldest_revision, newest_revision, | |
111 rep): | |
112 """Parses bench data from files like bench_r<revision>_<scalar>. | |
113 | |
114 (str, {str, str}, Number, Number) -> {int:[BenchDataPoints]}""" | |
115 revision_data_points = {} # {revision : [BenchDataPoints]} | |
116 file_list = os.listdir(directory) | 78 file_list = os.listdir(directory) |
117 file_list.sort() | 79 file_list.sort() |
118 for bench_file in file_list: | 80 for bench_file in file_list: |
119 file_name_match = re.match('bench_r(\d+)_(\S+)', bench_file) | 81 scalar_type = None |
120 if (file_name_match is None): | 82 # Scalar type, if any, is in the bench filename after revision |
121 continue | 83 if (len(revision) > MAX_SVN_REV_LENGTH and |
122 | 84 bench_file.startswith('bench_' + revision + '_')): |
123 revision = int(file_name_match.group(1)) | 85 # The revision is GIT commit hash. |
124 scalar_type = file_name_match.group(2) | 86 scalar_type = bench_file[len(revision) + len('bench_') + 1:] |
125 | 87 elif (bench_file.startswith('bench_r' + revision + '_') and |
126 if (revision < oldest_revision or revision > newest_revision): | 88 revision.isdigit()): |
| 89 # The revision is SVN number |
| 90 scalar_type = bench_file[len(revision) + len('bench_r') + 1:] |
| 91 else: |
127 continue | 92 continue |
128 | 93 |
129 file_handle = open(directory + '/' + bench_file, 'r') | 94 file_handle = open(directory + '/' + bench_file, 'r') |
130 | 95 |
131 if (revision not in revision_data_points): | |
132 revision_data_points[revision] = [] | |
133 default_settings['scalar'] = scalar_type | 96 default_settings['scalar'] = scalar_type |
134 revision_data_points[revision].extend( | 97 revision_data_points.extend( |
135 bench_util.parse(default_settings, file_handle, rep)) | 98 bench_util.parse(default_settings, file_handle, rep)) |
136 file_handle.close() | 99 file_handle.close() |
137 return revision_data_points | 100 return revision_data_points |
138 | 101 |
139 def add_to_revision_data_points(new_point, revision, revision_data_points): | 102 def create_bench_dict(revision_data_points): |
140 """Add new_point to set of revision_data_points we are building up. | 103 """Convert current revision data into a dictionary of line data. |
141 """ | |
142 if (revision not in revision_data_points): | |
143 revision_data_points[revision] = [] | |
144 revision_data_points[revision].append(new_point) | |
145 | 104 |
146 def filter_data_points(unfiltered_revision_data_points): | |
147 """Filter out any data points that are utterly bogus. | |
148 | |
149 Returns (allowed_revision_data_points, ignored_revision_data_points): | |
150 allowed_revision_data_points: points that survived the filter | |
151 ignored_revision_data_points: points that did NOT survive the filter | |
152 """ | |
153 allowed_revision_data_points = {} # {revision : [BenchDataPoints]} | |
154 ignored_revision_data_points = {} # {revision : [BenchDataPoints]} | |
155 revisions = unfiltered_revision_data_points.keys() | |
156 revisions.sort() | |
157 for revision in revisions: | |
158 for point in unfiltered_revision_data_points[revision]: | |
159 if point.time < MIN_REASONABLE_TIME or point.time > MAX_REASONABLE_T
IME: | |
160 add_to_revision_data_points(point, revision, ignored_revision_da
ta_points) | |
161 else: | |
162 add_to_revision_data_points(point, revision, allowed_revision_da
ta_points) | |
163 return (allowed_revision_data_points, ignored_revision_data_points) | |
164 | |
165 def get_abs_path(relative_path): | |
166 """My own implementation of os.path.abspath() that better handles paths | |
167 which approach Window's 260-character limit. | |
168 See https://code.google.com/p/skia/issues/detail?id=674 | |
169 | |
170 This implementation adds path components one at a time, resolving the | |
171 absolute path each time, to take advantage of any chdirs into outer | |
172 directories that will shorten the total path length. | |
173 | |
174 TODO: share a single implementation with upload_to_bucket.py, instead | |
175 of pasting this same code into both files.""" | |
176 if os.path.isabs(relative_path): | |
177 return relative_path | |
178 path_parts = relative_path.split(os.sep) | |
179 abs_path = os.path.abspath('.') | |
180 for path_part in path_parts: | |
181 abs_path = os.path.abspath(os.path.join(abs_path, path_part)) | |
182 return abs_path | |
183 | |
184 def redirect_stdout(output_path): | |
185 """Redirect all following stdout to a file. | |
186 | |
187 You may be asking yourself, why redirect stdout within Python rather than | |
188 redirecting the script's output in the calling shell? | |
189 The answer lies in https://code.google.com/p/skia/issues/detail?id=674 | |
190 ('buildbot: windows GenerateBenchGraphs step fails due to filename length'): | |
191 On Windows, we need to generate the absolute path within Python to avoid | |
192 the operating system's 260-character pathname limit, including chdirs.""" | |
193 abs_path = get_abs_path(output_path) | |
194 sys.stdout = open(abs_path, 'w') | |
195 | |
196 def create_lines(revision_data_points, settings | |
197 , bench_of_interest, config_of_interest, time_of_interest | |
198 , time_to_ignore): | |
199 """Convert revision data into a dictionary of line data. | |
200 | |
201 Args: | 105 Args: |
202 revision_data_points: a dictionary with integer keys (revision #) and a | 106 revision_data_points: a list of bench data points |
203 list of bench data points as values | |
204 settings: a dictionary of setting names to value | |
205 bench_of_interest: optional filter parameters: which bench type is of | |
206 interest. If None, process them all. | |
207 config_of_interest: optional filter parameters: which config type is of | |
208 interest. If None, process them all. | |
209 time_of_interest: optional filter parameters: which timer type is of | |
210 interest. If None, process them all. | |
211 time_to_ignore: optional timer type to ignore | |
212 | 107 |
213 Returns: | 108 Returns: |
214 a dictionary of this form: | 109 a dictionary of this form: |
215 keys = Label objects | 110 keys = Label objects |
216 values = a list of (x, y) tuples sorted such that x values increase | 111 values = the corresponding bench value |
217 monotonically | |
218 """ | 112 """ |
219 revisions = revision_data_points.keys() | 113 bench_dict = {} |
220 revisions.sort() | 114 for point in revision_data_points: |
221 lines = {} # {Label:[(x,y)] | x[n] <= x[n+1]} | 115 point_name = Label(point.bench,point.config,point.time_type, |
222 for revision in revisions: | 116 point.settings) |
223 for point in revision_data_points[revision]: | 117 if point_name not in bench_dict: |
224 if (bench_of_interest is not None and | 118 bench_dict[point_name] = point.time |
225 not bench_of_interest == point.bench): | 119 else: |
226 continue | 120 raise Exception('Duplicate expectation entry: ' + str(point_name)) |
227 | |
228 if (config_of_interest is not None and | |
229 not config_of_interest == point.config): | |
230 continue | |
231 | |
232 if (time_of_interest is not None and | |
233 not time_of_interest == point.time_type): | |
234 continue | |
235 elif (time_to_ignore is not None and | |
236 time_to_ignore == point.time_type): | |
237 continue | |
238 | |
239 skip = False | |
240 for key, value in settings.items(): | |
241 if key in point.settings and point.settings[key] != value: | |
242 skip = True | |
243 break | |
244 if skip: | |
245 continue | |
246 | |
247 line_name = Label(point.bench | |
248 , point.config | |
249 , point.time_type | |
250 , point.settings) | |
251 | |
252 if line_name not in lines: | |
253 lines[line_name] = [] | |
254 | |
255 lines[line_name].append((revision, point.time)) | |
256 | |
257 return lines | |
258 | 121 |
259 def bounds(lines): | 122 return bench_dict |
260 """Finds the bounding rectangle for the lines. | |
261 | |
262 {Label:[(x,y)]} -> ((min_x, min_y),(max_x,max_y))""" | |
263 min_x = bench_util.Max | |
264 min_y = bench_util.Max | |
265 max_x = bench_util.Min | |
266 max_y = bench_util.Min | |
267 | |
268 for line in lines.itervalues(): | |
269 for x, y in line: | |
270 min_x = min(min_x, x) | |
271 min_y = min(min_y, y) | |
272 max_x = max(max_x, x) | |
273 max_y = max(max_y, y) | |
274 | |
275 return ((min_x, min_y), (max_x, max_y)) | |
276 | 123 |
277 def create_regressions(lines, start_x, end_x): | 124 def read_expectations(expectations, filename): |
278 """Creates regression data from line segments. | 125 """Reads expectations data from file and put in expectations dict.""" |
279 | 126 for expectation in open(filename).readlines(): |
280 ({Label:[(x,y)] | [n].x <= [n+1].x}, Number, Number) | 127 elements = expectation.strip().split(',') |
281 -> {Label:LinearRegression}""" | 128 if not elements[0] or elements[0].startswith('#'): |
282 regressions = {} # {Label : LinearRegression} | |
283 | |
284 for label, line in lines.iteritems(): | |
285 regression_line = [p for p in line if start_x <= p[0] <= end_x] | |
286 | |
287 if (len(regression_line) < 2): | |
288 continue | 129 continue |
289 regression = bench_util.LinearRegression(regression_line) | 130 if len(elements) != 5: |
290 regressions[label] = regression | 131 raise Exception("Invalid expectation line format: %s" % |
291 | 132 expectation) |
292 return regressions | 133 bench_entry = elements[0] + ',' + elements[1] |
| 134 if bench_entry in expectations: |
| 135 raise Exception("Dup entries for bench expectation %s" % |
| 136 bench_entry) |
| 137 # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB) |
| 138 expectations[bench_entry] = (float(elements[-2]), |
| 139 float(elements[-1])) |
293 | 140 |
294 def bounds_slope(regressions): | 141 def check_expectations(lines, expectations, revision, key_suffix): |
295 """Finds the extreme up and down slopes of a set of linear regressions. | 142 """Check if there are benches in the given revising out of range. |
296 | 143 """ |
297 ({Label:LinearRegression}) -> (max_up_slope, min_down_slope)""" | 144 # The platform for this bot, to pass to the dashboard plot. |
298 max_up_slope = 0 | 145 platform = key_suffix[ : key_suffix.rfind('-')] |
299 min_down_slope = 0 | 146 exceptions = [] |
300 for regression in regressions.itervalues(): | 147 for line in lines: |
301 min_slope = regression.find_min_slope() | 148 line_str = str(line) |
302 max_up_slope = max(max_up_slope, min_slope) | 149 line_str = line_str[ : line_str.find('_{')] |
303 min_down_slope = min(min_down_slope, min_slope) | 150 bench_platform_key = line_str + ',' + key_suffix |
304 | 151 if bench_platform_key not in expectations: |
305 return (max_up_slope, min_down_slope) | 152 continue |
| 153 this_bench_value = lines[line] |
| 154 this_min, this_max = expectations[bench_platform_key] |
| 155 if this_bench_value < this_min or this_bench_value > this_max: |
| 156 exception = 'Bench %s value %s out of range [%s, %s].' % ( |
| 157 bench_platform_key, this_bench_value, this_min, this_max) |
| 158 exceptions.append(exception) |
| 159 if exceptions: |
| 160 raise Exception('Bench values out of range:\n' + |
| 161 '\n'.join(exceptions)) |
306 | 162 |
307 def main(): | 163 def main(): |
308 """Parses command line and writes output.""" | 164 """Parses command line and checks bench expectations.""" |
309 | |
310 try: | 165 try: |
311 opts, _ = getopt.getopt(sys.argv[1:] | 166 opts, _ = getopt.getopt(sys.argv[1:], |
312 , "a:b:c:d:e:f:i:l:m:o:r:s:t:x:y:" | 167 "a:b:d:e:r:", |
313 , "default-setting=") | 168 "default-setting=") |
314 except getopt.GetoptError, err: | 169 except getopt.GetoptError, err: |
315 print str(err) | 170 print str(err) |
316 usage() | 171 usage() |
317 sys.exit(2) | 172 sys.exit(2) |
318 | 173 |
319 directory = None | 174 directory = None |
320 config_of_interest = None | |
321 bench_of_interest = None | |
322 time_of_interest = None | |
323 time_to_ignore = None | |
324 output_path = None | |
325 bench_expectations = {} | 175 bench_expectations = {} |
326 appengine_url = None # used for adding data to appengine datastore | 176 rep = '25th' # bench representation algorithm, default to 25th |
327 rep = None # bench representation algorithm | 177 rev = None # git commit hash or svn revision number |
328 revision_range = '0:' | 178 bot = None |
329 regression_range = '0:' | |
330 latest_revision = None | |
331 requested_height = None | |
332 requested_width = None | |
333 title = 'Bench graph' | |
334 settings = {} | |
335 default_settings = {} | |
336 | |
337 def parse_range(range): | |
338 """Takes '<old>[:<new>]' as a string and returns (old, new). | |
339 Any revision numbers that are dependent on the latest revision number | |
340 will be filled in based on latest_revision. | |
341 """ | |
342 old, _, new = range.partition(":") | |
343 old = int(old) | |
344 if old < 0: | |
345 old += latest_revision; | |
346 if not new: | |
347 new = latest_revision; | |
348 new = int(new) | |
349 if new < 0: | |
350 new += latest_revision; | |
351 return (old, new) | |
352 | |
353 def add_setting(settings, setting): | |
354 """Takes <key>[=<value>] adds {key:value} or {key:True} to settings.""" | |
355 name, _, value = setting.partition('=') | |
356 if not value: | |
357 settings[name] = True | |
358 else: | |
359 settings[name] = value | |
360 | |
361 def read_expectations(expectations, filename): | |
362 """Reads expectations data from file and put in expectations dict.""" | |
363 for expectation in open(filename).readlines(): | |
364 elements = expectation.strip().split(',') | |
365 if not elements[0] or elements[0].startswith('#'): | |
366 continue | |
367 if len(elements) != 5: | |
368 raise Exception("Invalid expectation line format: %s" % | |
369 expectation) | |
370 bench_entry = elements[0] + ',' + elements[1] | |
371 if bench_entry in expectations: | |
372 raise Exception("Dup entries for bench expectation %s" % | |
373 bench_entry) | |
374 # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB) | |
375 expectations[bench_entry] = (float(elements[-2]), | |
376 float(elements[-1])) | |
377 | |
378 def check_expectations(lines, expectations, newest_revision, key_suffix): | |
379 """Check if there are benches in latest rev outside expected range. | |
380 For exceptions, also outputs URL link for the dashboard plot. | |
381 The link history token format here only works for single-line plots. | |
382 """ | |
383 # The platform for this bot, to pass to the dashboard plot. | |
384 platform = key_suffix[ : key_suffix.rfind('-')] | |
385 # Starting revision for the dashboard plot. | |
386 start_rev = str(newest_revision - 100) # Displays about 100 revisions. | |
387 exceptions = [] | |
388 for line in lines: | |
389 line_str = str(line) | |
390 line_str = line_str[ : line_str.find('_{')] | |
391 bench_platform_key = line_str + ',' + key_suffix | |
392 this_revision, this_bench_value = lines[line][-1] | |
393 if (this_revision != newest_revision or | |
394 bench_platform_key not in expectations): | |
395 # Skip benches without value for latest revision. | |
396 continue | |
397 this_min, this_max = expectations[bench_platform_key] | |
398 if this_bench_value < this_min or this_bench_value > this_max: | |
399 link = '' | |
400 # For skp benches out of range, create dashboard plot link. | |
401 if line_str.find('.skp_') > 0: | |
402 # Extract bench and config for dashboard plot. | |
403 bench, config = line_str.strip('_').split('.skp_') | |
404 link = ' <a href="' | |
405 link += 'http://go/skpdash/SkpDash.html#%s~%s~%s~%s" ' % ( | |
406 start_rev, bench, platform, config) | |
407 link += 'target="_blank">graph</a>' | |
408 exception = 'Bench %s value %s out of range [%s, %s].%s' % ( | |
409 bench_platform_key, this_bench_value, this_min, this_max, | |
410 link) | |
411 exceptions.append(exception) | |
412 if exceptions: | |
413 raise Exception('Bench values out of range:\n' + | |
414 '\n'.join(exceptions)) | |
415 | |
416 def write_to_appengine(line_data_dict, url, newest_revision, bot): | |
417 """Writes latest bench values to appengine datastore. | |
418 line_data_dict: dictionary from create_lines. | |
419 url: the appengine url used to send bench values to write | |
420 newest_revision: the latest revision that this script reads | |
421 bot: the bot platform the bench is run on | |
422 """ | |
423 config_data_dic = {} | |
424 for label in line_data_dict.iterkeys(): | |
425 if not label.bench.endswith('.skp') or label.time_type: | |
426 # filter out non-picture and non-walltime benches | |
427 continue | |
428 config = label.config | |
429 rev, val = line_data_dict[label][-1] | |
430 # This assumes that newest_revision is >= the revision of the last | |
431 # data point we have for each line. | |
432 if rev != newest_revision: | |
433 continue | |
434 if config not in config_data_dic: | |
435 config_data_dic[config] = [] | |
436 config_data_dic[config].append(label.bench.replace('.skp', '') + | |
437 ':%.2f' % val) | |
438 for config in config_data_dic: | |
439 if config_data_dic[config]: | |
440 data = {'master': 'Skia', 'bot': bot, 'test': config, | |
441 'revision': newest_revision, | |
442 'benches': ','.join(config_data_dic[config])} | |
443 req = urllib2.Request(appengine_url, | |
444 urllib.urlencode({'data': json.dumps(data)})) | |
445 try: | |
446 urllib2.urlopen(req) | |
447 except urllib2.HTTPError, e: | |
448 sys.stderr.write("HTTPError for JSON data %s: %s\n" % ( | |
449 data, e)) | |
450 except urllib2.URLError, e: | |
451 sys.stderr.write("URLError for JSON data %s: %s\n" % ( | |
452 data, e)) | |
453 except httplib.HTTPException, e: | |
454 sys.stderr.write("HTTPException for JSON data %s: %s\n" % ( | |
455 data, e)) | |
456 | 179 |
457 try: | 180 try: |
458 for option, value in opts: | 181 for option, value in opts: |
459 if option == "-a": | 182 if option == "-a": |
460 appengine_url = value | 183 rep = value |
461 elif option == "-b": | 184 elif option == "-b": |
462 bench_of_interest = value | 185 bot = value |
463 elif option == "-c": | |
464 config_of_interest = value | |
465 elif option == "-d": | 186 elif option == "-d": |
466 directory = value | 187 directory = value |
467 elif option == "-e": | 188 elif option == "-e": |
468 read_expectations(bench_expectations, value) | 189 read_expectations(bench_expectations, value) |
469 elif option == "-f": | |
470 regression_range = value | |
471 elif option == "-i": | |
472 time_to_ignore = value | |
473 elif option == "-l": | |
474 title = value | |
475 elif option == "-m": | |
476 rep = value | |
477 elif option == "-o": | |
478 output_path = value | |
479 redirect_stdout(output_path) | |
480 elif option == "-r": | 190 elif option == "-r": |
481 revision_range = value | 191 rev = value |
482 elif option == "-s": | |
483 add_setting(settings, value) | |
484 elif option == "-t": | |
485 time_of_interest = value | |
486 elif option == "-x": | |
487 requested_width = int(value) | |
488 elif option == "-y": | |
489 requested_height = int(value) | |
490 elif option == "--default-setting": | |
491 add_setting(default_settings, value) | |
492 else: | 192 else: |
493 usage() | 193 usage() |
494 assert False, "unhandled option" | 194 assert False, "unhandled option" |
495 except ValueError: | 195 except ValueError: |
496 usage() | 196 usage() |
497 sys.exit(2) | 197 sys.exit(2) |
498 | 198 |
499 if directory is None: | 199 if directory is None or bot is None or rev is None: |
500 usage() | 200 usage() |
501 sys.exit(2) | 201 sys.exit(2) |
502 | 202 |
503 if time_of_interest: | 203 platform_and_alg = bot + '-' + rep |
504 time_to_ignore = None | |
505 | 204 |
506 # The title flag (-l) provided in buildbot slave is in the format | 205 data_points = parse_dir(directory, |
507 # Bench_Performance_for_<platform>, and we want to extract <platform> | 206 {}, # Sets default settings to empty. |
508 # for use in platform_and_alg to track matching benches later. If title flag | 207 rev, |
509 # is not in this format, there may be no matching benches in the file | 208 rep) |
510 # provided by the expectation_file flag (-e). | |
511 bot = title # To store the platform as bot name | |
512 platform_and_alg = title | |
513 if platform_and_alg.startswith(TITLE_PREAMBLE): | |
514 bot = platform_and_alg[TITLE_PREAMBLE_LENGTH:] | |
515 platform_and_alg = bot + '-' + rep | |
516 title += ' [representation: %s]' % rep | |
517 | 209 |
518 latest_revision = get_latest_revision(directory) | 210 bench_dict = create_bench_dict(data_points) |
519 oldest_revision, newest_revision = parse_range(revision_range) | |
520 oldest_regression, newest_regression = parse_range(regression_range) | |
521 | |
522 unfiltered_revision_data_points = parse_dir(directory | |
523 , default_settings | |
524 , oldest_revision | |
525 , newest_revision | |
526 , rep) | |
527 | |
528 # Filter out any data points that are utterly bogus... make sure to report | |
529 # that we did so later! | |
530 (allowed_revision_data_points, ignored_revision_data_points) = filter_data_p
oints( | |
531 unfiltered_revision_data_points) | |
532 | |
533 # Update oldest_revision and newest_revision based on the data we could find | |
534 all_revision_numbers = allowed_revision_data_points.keys() | |
535 oldest_revision = min(all_revision_numbers) | |
536 newest_revision = max(all_revision_numbers) | |
537 | |
538 lines = create_lines(allowed_revision_data_points | |
539 , settings | |
540 , bench_of_interest | |
541 , config_of_interest | |
542 , time_of_interest | |
543 , time_to_ignore) | |
544 | |
545 regressions = create_regressions(lines | |
546 , oldest_regression | |
547 , newest_regression) | |
548 | |
549 if output_path: | |
550 output_xhtml(lines, oldest_revision, newest_revision, | |
551 ignored_revision_data_points, regressions, requested_width, | |
552 requested_height, title) | |
553 | |
554 if appengine_url: | |
555 write_to_appengine(lines, appengine_url, newest_revision, bot) | |
556 | 211 |
557 if bench_expectations: | 212 if bench_expectations: |
558 check_expectations(lines, bench_expectations, newest_revision, | 213 check_expectations(bench_dict, bench_expectations, rev, |
559 platform_and_alg) | 214 platform_and_alg) |
560 | 215 |
561 def qa(out): | |
562 """Stringify input and quote as an xml attribute.""" | |
563 return xml.sax.saxutils.quoteattr(str(out)) | |
564 def qe(out): | |
565 """Stringify input and escape as xml data.""" | |
566 return xml.sax.saxutils.escape(str(out)) | |
567 | |
568 def create_select(qualifier, lines, select_id=None): | |
569 """Output select with options showing lines which qualifier maps to it. | |
570 | |
571 ((Label) -> str, {Label:_}, str?) -> _""" | |
572 options = {} #{ option : [Label]} | |
573 for label in lines.keys(): | |
574 option = qualifier(label) | |
575 if (option not in options): | |
576 options[option] = [] | |
577 options[option].append(label) | |
578 option_list = list(options.keys()) | |
579 option_list.sort() | |
580 print '<select class="lines"', | |
581 if select_id is not None: | |
582 print 'id=%s' % qa(select_id) | |
583 print 'multiple="true" size="10" onchange="updateSvg();">' | |
584 for option in option_list: | |
585 print '<option value=' + qa('[' + | |
586 reduce(lambda x,y:x+json.dumps(str(y))+',',options[option],"")[0:-1] | |
587 + ']') + '>'+qe(option)+'</option>' | |
588 print '</select>' | |
589 | |
590 def output_ignored_data_points_warning(ignored_revision_data_points): | |
591 """Write description of ignored_revision_data_points to stdout as xhtml. | |
592 """ | |
593 num_ignored_points = 0 | |
594 description = '' | |
595 revisions = ignored_revision_data_points.keys() | |
596 if revisions: | |
597 revisions.sort() | |
598 revisions.reverse() | |
599 for revision in revisions: | |
600 num_ignored_points += len(ignored_revision_data_points[revision]) | |
601 points_at_this_revision = [] | |
602 for point in ignored_revision_data_points[revision]: | |
603 points_at_this_revision.append(point.bench) | |
604 points_at_this_revision.sort() | |
605 description += 'r%d: %s\n' % (revision, points_at_this_revision) | |
606 if num_ignored_points == 0: | |
607 print 'Did not discard any data points; all were within the range [%d-%d
]' % ( | |
608 MIN_REASONABLE_TIME, MAX_REASONABLE_TIME) | |
609 else: | |
610 print '<table width="100%" bgcolor="ff0000"><tr><td align="center">' | |
611 print 'Discarded %d data points outside of range [%d-%d]' % ( | |
612 num_ignored_points, MIN_REASONABLE_TIME, MAX_REASONABLE_TIME) | |
613 print '</td></tr><tr><td width="100%" align="center">' | |
614 print ('<textarea rows="4" style="width:97%" readonly="true" wrap="off">
' | |
615 + qe(description) + '</textarea>') | |
616 print '</td></tr></table>' | |
617 | |
618 def output_xhtml(lines, oldest_revision, newest_revision, ignored_revision_data_
points, | |
619 regressions, requested_width, requested_height, title): | |
620 """Outputs an svg/xhtml view of the data.""" | |
621 print '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"', | |
622 print '"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">' | |
623 print '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">' | |
624 print '<head>' | |
625 print '<title>%s</title>' % qe(title) | |
626 print '</head>' | |
627 print '<body>' | |
628 | |
629 output_svg(lines, regressions, requested_width, requested_height) | |
630 | |
631 #output the manipulation controls | |
632 print """ | |
633 <script type="text/javascript">//<![CDATA[ | |
634 function getElementsByClass(node, searchClass, tag) { | |
635 var classElements = new Array(); | |
636 var elements = node.getElementsByTagName(tag); | |
637 var pattern = new RegExp("^|\\s"+searchClass+"\\s|$"); | |
638 for (var i = 0, elementsFound = 0; i < elements.length; ++i) { | |
639 if (pattern.test(elements[i].className)) { | |
640 classElements[elementsFound] = elements[i]; | |
641 ++elementsFound; | |
642 } | |
643 } | |
644 return classElements; | |
645 } | |
646 function getAllLines() { | |
647 var selectElem = document.getElementById('benchSelect'); | |
648 var linesObj = {}; | |
649 for (var i = 0; i < selectElem.options.length; ++i) { | |
650 var lines = JSON.parse(selectElem.options[i].value); | |
651 for (var j = 0; j < lines.length; ++j) { | |
652 linesObj[lines[j]] = true; | |
653 } | |
654 } | |
655 return linesObj; | |
656 } | |
657 function getOptions(selectElem) { | |
658 var linesSelectedObj = {}; | |
659 for (var i = 0; i < selectElem.options.length; ++i) { | |
660 if (!selectElem.options[i].selected) continue; | |
661 | |
662 var linesSelected = JSON.parse(selectElem.options[i].value); | |
663 for (var j = 0; j < linesSelected.length; ++j) { | |
664 linesSelectedObj[linesSelected[j]] = true; | |
665 } | |
666 } | |
667 return linesSelectedObj; | |
668 } | |
669 function objectEmpty(obj) { | |
670 for (var p in obj) { | |
671 return false; | |
672 } | |
673 return true; | |
674 } | |
675 function markSelectedLines(selectElem, allLines) { | |
676 var linesSelected = getOptions(selectElem); | |
677 if (!objectEmpty(linesSelected)) { | |
678 for (var line in allLines) { | |
679 allLines[line] &= (linesSelected[line] == true); | |
680 } | |
681 } | |
682 } | |
683 function updateSvg() { | |
684 var allLines = getAllLines(); | |
685 | |
686 var selects = getElementsByClass(document, 'lines', 'select'); | |
687 for (var i = 0; i < selects.length; ++i) { | |
688 markSelectedLines(selects[i], allLines); | |
689 } | |
690 | |
691 for (var line in allLines) { | |
692 var svgLine = document.getElementById(line); | |
693 var display = (allLines[line] ? 'inline' : 'none'); | |
694 svgLine.setAttributeNS(null,'display', display); | |
695 } | |
696 } | |
697 | |
698 function mark(markerId) { | |
699 for (var line in getAllLines()) { | |
700 var svgLineGroup = document.getElementById(line); | |
701 var display = svgLineGroup.getAttributeNS(null,'display'); | |
702 if (display == null || display == "" || display != "none") { | |
703 var svgLine = document.getElementById(line+'_line'); | |
704 if (markerId == null) { | |
705 svgLine.removeAttributeNS(null,'marker-mid'); | |
706 } else { | |
707 svgLine.setAttributeNS(null,'marker-mid', markerId); | |
708 } | |
709 } | |
710 } | |
711 } | |
712 //]]></script>""" | |
713 | |
714 all_settings = {} | |
715 variant_settings = set() | |
716 for label in lines.keys(): | |
717 for key, value in label.settings.items(): | |
718 if key not in all_settings: | |
719 all_settings[key] = value | |
720 elif all_settings[key] != value: | |
721 variant_settings.add(key) | |
722 | |
723 print '<table border="0" width="%s">' % requested_width | |
724 #output column headers | |
725 print """ | |
726 <tr valign="top"><td width="50%"> | |
727 <table border="0" width="100%"> | |
728 <tr><td align="center"><table border="0"> | |
729 <form> | |
730 <tr valign="bottom" align="center"> | |
731 <td width="1">Bench Type</td> | |
732 <td width="1">Bitmap Config</td> | |
733 <td width="1">Timer Type (Cpu/Gpu/wall)</td> | |
734 """ | |
735 | |
736 for k in variant_settings: | |
737 print '<td width="1">%s</td>' % qe(k) | |
738 | |
739 print '<td width="1"><!--buttons--></td></tr>' | |
740 | |
741 #output column contents | |
742 print '<tr valign="top" align="center">' | |
743 print '<td width="1">' | |
744 create_select(lambda l: l.bench, lines, 'benchSelect') | |
745 print '</td><td width="1">' | |
746 create_select(lambda l: l.config, lines) | |
747 print '</td><td width="1">' | |
748 create_select(lambda l: l.time_type, lines) | |
749 | |
750 for k in variant_settings: | |
751 print '</td><td width="1">' | |
752 create_select(lambda l: l.settings.get(k, " "), lines) | |
753 | |
754 print '</td><td width="1"><button type="button"', | |
755 print 'onclick=%s' % qa("mark('url(#circleMark)'); return false;"), | |
756 print '>Mark Points</button>' | |
757 print '<button type="button" onclick="mark(null);">Clear Points</button>' | |
758 print '</td>' | |
759 print """ | |
760 </tr> | |
761 </form> | |
762 </table></td></tr> | |
763 <tr><td align="center"> | |
764 <hr /> | |
765 """ | |
766 | |
767 output_ignored_data_points_warning(ignored_revision_data_points) | |
768 print '</td></tr></table>' | |
769 print '</td><td width="2%"><!--gutter--></td>' | |
770 | |
771 print '<td><table border="0">' | |
772 print '<tr><td align="center">%s<br></br>revisions r%s - r%s</td></tr>' % ( | |
773 qe(title), | |
774 bench_util.CreateRevisionLink(oldest_revision), | |
775 bench_util.CreateRevisionLink(newest_revision)) | |
776 print """ | |
777 <tr><td align="left"> | |
778 <p>Brighter red indicates tests that have gotten worse; brighter green | |
779 indicates tests that have gotten better.</p> | |
780 <p>To highlight individual tests, hold down CONTROL and mouse over | |
781 graph lines.</p> | |
782 <p>To highlight revision numbers, hold down SHIFT and mouse over | |
783 the graph area.</p> | |
784 <p>To only show certain tests on the graph, select any combination of | |
785 tests in the selectors at left. (To show all, select all.)</p> | |
786 <p>Use buttons at left to mark/clear points on the lines for selected | |
787 benchmarks.</p> | |
788 </td></tr> | |
789 </table> | |
790 | |
791 </td> | |
792 </tr> | |
793 </table> | |
794 </body> | |
795 </html>""" | |
796 | |
797 def compute_size(requested_width, requested_height, rev_width, time_height): | |
798 """Converts potentially empty requested size into a concrete size. | |
799 | |
800 (Number?, Number?) -> (Number, Number)""" | |
801 pic_width = 0 | |
802 pic_height = 0 | |
803 if (requested_width is not None and requested_height is not None): | |
804 pic_height = requested_height | |
805 pic_width = requested_width | |
806 | |
807 elif (requested_width is not None): | |
808 pic_width = requested_width | |
809 pic_height = pic_width * (float(time_height) / rev_width) | |
810 | |
811 elif (requested_height is not None): | |
812 pic_height = requested_height | |
813 pic_width = pic_height * (float(rev_width) / time_height) | |
814 | |
815 else: | |
816 pic_height = 800 | |
817 pic_width = max(rev_width*3 | |
818 , pic_height * (float(rev_width) / time_height)) | |
819 | |
820 return (pic_width, pic_height) | |
821 | |
822 def output_svg(lines, regressions, requested_width, requested_height): | |
823 """Outputs an svg view of the data.""" | |
824 | |
825 (global_min_x, _), (global_max_x, global_max_y) = bounds(lines) | |
826 max_up_slope, min_down_slope = bounds_slope(regressions) | |
827 | |
828 #output | |
829 global_min_y = 0 | |
830 x = global_min_x | |
831 y = global_min_y | |
832 w = global_max_x - global_min_x | |
833 h = global_max_y - global_min_y | |
834 font_size = 16 | |
835 line_width = 2 | |
836 | |
837 # If there is nothing to see, don't try to draw anything. | |
838 if w == 0 or h == 0: | |
839 return | |
840 | |
841 pic_width, pic_height = compute_size(requested_width, requested_height | |
842 , w, h) | |
843 | |
844 def cw(w1): | |
845 """Converts a revision difference to display width.""" | |
846 return (pic_width / float(w)) * w1 | |
847 def cx(x): | |
848 """Converts a revision to a horizontal display position.""" | |
849 return cw(x - global_min_x) | |
850 | |
851 def ch(h1): | |
852 """Converts a time difference to a display height.""" | |
853 return -(pic_height / float(h)) * h1 | |
854 def cy(y): | |
855 """Converts a time to a vertical display position.""" | |
856 return pic_height + ch(y - global_min_y) | |
857 | |
858 print '<!--Picture height %.2f corresponds to bench value %.2f.-->' % ( | |
859 pic_height, h) | |
860 print '<svg', | |
861 print 'width=%s' % qa(str(pic_width)+'px') | |
862 print 'height=%s' % qa(str(pic_height)+'px') | |
863 print 'viewBox="0 0 %s %s"' % (str(pic_width), str(pic_height)) | |
864 print 'onclick=%s' % qa( | |
865 "var event = arguments[0] || window.event;" | |
866 " if (event.shiftKey) { highlightRevision(null); }" | |
867 " if (event.ctrlKey) { highlight(null); }" | |
868 " return false;") | |
869 print 'xmlns="http://www.w3.org/2000/svg"' | |
870 print 'xmlns:xlink="http://www.w3.org/1999/xlink">' | |
871 | |
872 print """ | |
873 <defs> | |
874 <marker id="circleMark" | |
875 viewBox="0 0 2 2" refX="1" refY="1" | |
876 markerUnits="strokeWidth" | |
877 markerWidth="2" markerHeight="2" | |
878 orient="0"> | |
879 <circle cx="1" cy="1" r="1"/> | |
880 </marker> | |
881 </defs>""" | |
882 | |
883 #output the revisions | |
884 print """ | |
885 <script type="text/javascript">//<![CDATA[ | |
886 var previousRevision; | |
887 var previousRevisionFill; | |
888 var previousRevisionStroke | |
889 function highlightRevision(id) { | |
890 if (previousRevision == id) return; | |
891 | |
892 document.getElementById('revision').firstChild.nodeValue = 'r' + id; | |
893 document.getElementById('rev_link').setAttribute('xlink:href', | |
894 'http://code.google.com/p/skia/source/detail?r=' + id); | |
895 | |
896 var preRevision = document.getElementById(previousRevision); | |
897 if (preRevision) { | |
898 preRevision.setAttributeNS(null,'fill', previousRevisionFill); | |
899 preRevision.setAttributeNS(null,'stroke', previousRevisionStroke); | |
900 } | |
901 | |
902 var revision = document.getElementById(id); | |
903 previousRevision = id; | |
904 if (revision) { | |
905 previousRevisionFill = revision.getAttributeNS(null,'fill'); | |
906 revision.setAttributeNS(null,'fill','rgb(100%, 95%, 95%)'); | |
907 | |
908 previousRevisionStroke = revision.getAttributeNS(null,'stroke'); | |
909 revision.setAttributeNS(null,'stroke','rgb(100%, 90%, 90%)'); | |
910 } | |
911 } | |
912 //]]></script>""" | |
913 | |
914 def print_rect(x, y, w, h, revision): | |
915 """Outputs a revision rectangle in display space, | |
916 taking arguments in revision space.""" | |
917 disp_y = cy(y) | |
918 disp_h = ch(h) | |
919 if disp_h < 0: | |
920 disp_y += disp_h | |
921 disp_h = -disp_h | |
922 | |
923 print '<rect id=%s x=%s y=%s' % (qa(revision), qa(cx(x)), qa(disp_y),), | |
924 print 'width=%s height=%s' % (qa(cw(w)), qa(disp_h),), | |
925 print 'fill="white"', | |
926 print 'stroke="rgb(98%%,98%%,88%%)" stroke-width=%s' % qa(line_width), | |
927 print 'onmouseover=%s' % qa( | |
928 "var event = arguments[0] || window.event;" | |
929 " if (event.shiftKey) {" | |
930 " highlightRevision('"+str(revision)+"');" | |
931 " return false;" | |
932 " }"), | |
933 print ' />' | |
934 | |
935 xes = set() | |
936 for line in lines.itervalues(): | |
937 for point in line: | |
938 xes.add(point[0]) | |
939 revisions = list(xes) | |
940 revisions.sort() | |
941 | |
942 left = x | |
943 current_revision = revisions[0] | |
944 for next_revision in revisions[1:]: | |
945 width = (((next_revision - current_revision) / 2.0) | |
946 + (current_revision - left)) | |
947 print_rect(left, y, width, h, current_revision) | |
948 left += width | |
949 current_revision = next_revision | |
950 print_rect(left, y, x+w - left, h, current_revision) | |
951 | |
952 #output the lines | |
953 print """ | |
954 <script type="text/javascript">//<![CDATA[ | |
955 var previous; | |
956 var previousColor; | |
957 var previousOpacity; | |
958 function highlight(id) { | |
959 if (previous == id) return; | |
960 | |
961 document.getElementById('label').firstChild.nodeValue = id; | |
962 | |
963 var preGroup = document.getElementById(previous); | |
964 if (preGroup) { | |
965 var preLine = document.getElementById(previous+'_line'); | |
966 preLine.setAttributeNS(null,'stroke', previousColor); | |
967 preLine.setAttributeNS(null,'opacity', previousOpacity); | |
968 | |
969 var preSlope = document.getElementById(previous+'_linear'); | |
970 if (preSlope) { | |
971 preSlope.setAttributeNS(null,'visibility', 'hidden'); | |
972 } | |
973 } | |
974 | |
975 var group = document.getElementById(id); | |
976 previous = id; | |
977 if (group) { | |
978 group.parentNode.appendChild(group); | |
979 | |
980 var line = document.getElementById(id+'_line'); | |
981 previousColor = line.getAttributeNS(null,'stroke'); | |
982 previousOpacity = line.getAttributeNS(null,'opacity'); | |
983 line.setAttributeNS(null,'stroke', 'blue'); | |
984 line.setAttributeNS(null,'opacity', '1'); | |
985 | |
986 var slope = document.getElementById(id+'_linear'); | |
987 if (slope) { | |
988 slope.setAttributeNS(null,'visibility', 'visible'); | |
989 } | |
990 } | |
991 } | |
992 //]]></script>""" | |
993 | |
994 # Add a new element to each item in the 'lines' list: the label in string | |
995 # form. Then use that element to sort the list. | |
996 sorted_lines = [] | |
997 for label, line in lines.items(): | |
998 sorted_lines.append([str(label), label, line]) | |
999 sorted_lines.sort() | |
1000 | |
1001 for label_as_string, label, line in sorted_lines: | |
1002 print '<g id=%s>' % qa(label_as_string) | |
1003 r = 128 | |
1004 g = 128 | |
1005 b = 128 | |
1006 a = .10 | |
1007 if label in regressions: | |
1008 regression = regressions[label] | |
1009 min_slope = regression.find_min_slope() | |
1010 if min_slope < 0: | |
1011 d = max(0, (min_slope / min_down_slope)) | |
1012 g += int(d*128) | |
1013 a += d*0.9 | |
1014 elif min_slope > 0: | |
1015 d = max(0, (min_slope / max_up_slope)) | |
1016 r += int(d*128) | |
1017 a += d*0.9 | |
1018 | |
1019 slope = regression.slope | |
1020 intercept = regression.intercept | |
1021 min_x = regression.min_x | |
1022 max_x = regression.max_x | |
1023 print '<polyline id=%s' % qa(str(label)+'_linear'), | |
1024 print 'fill="none" stroke="yellow"', | |
1025 print 'stroke-width=%s' % qa(abs(ch(regression.serror*2))), | |
1026 print 'opacity="0.5" pointer-events="none" visibility="hidden"', | |
1027 print 'points="', | |
1028 print '%s,%s' % (str(cx(min_x)), str(cy(slope*min_x + intercept))), | |
1029 print '%s,%s' % (str(cx(max_x)), str(cy(slope*max_x + intercept))), | |
1030 print '"/>' | |
1031 | |
1032 print '<polyline id=%s' % qa(str(label)+'_line'), | |
1033 print 'onmouseover=%s' % qa( | |
1034 "var event = arguments[0] || window.event;" | |
1035 " if (event.ctrlKey) {" | |
1036 " highlight('"+str(label).replace("'", "\\'")+"');" | |
1037 " return false;" | |
1038 " }"), | |
1039 print 'fill="none" stroke="rgb(%s,%s,%s)"' % (str(r), str(g), str(b)), | |
1040 print 'stroke-width=%s' % qa(line_width), | |
1041 print 'opacity=%s' % qa(a), | |
1042 print 'points="', | |
1043 for point in line: | |
1044 print '%s,%s' % (str(cx(point[0])), str(cy(point[1]))), | |
1045 print '"/>' | |
1046 | |
1047 print '</g>' | |
1048 | |
1049 #output the labels | |
1050 print '<text id="label" x="0" y=%s' % qa(font_size), | |
1051 print 'font-size=%s> </text>' % qa(font_size) | |
1052 | |
1053 print '<a id="rev_link" xlink:href="" target="_top">' | |
1054 print '<text id="revision" x="0" y=%s style="' % qa(font_size*2) | |
1055 print 'font-size: %s; ' % qe(font_size) | |
1056 print 'stroke: #0000dd; text-decoration: underline; ' | |
1057 print '"> </text></a>' | |
1058 | |
1059 print '</svg>' | |
1060 | 216 |
1061 if __name__ == "__main__": | 217 if __name__ == "__main__": |
1062 main() | 218 main() |
OLD | NEW |