Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(121)

Side by Side Diff: tools/perf/core/perf_json_generator.py

Issue 2754883002: Generating benchmark.csv file (Closed)
Patch Set: Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 #!/usr/bin/env python
2 # Copyright 2016 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
5
6 """Script to generate chromium.perf.json and chromium.perf.fyi.json in
7 the src/testing/buildbot directory. Maintaining these files by hand is
8 too unwieldy.
9 """
10 import argparse
11 import json
12 import os
13 import sys
14
15 from chrome_telemetry_build import chromium_config
16
17 sys.path.append(chromium_config.GetTelemetryDir())
18 from telemetry import benchmark as benchmark_module
19 from telemetry.core import discover
20 from telemetry.util import bot_utils
21
22
23 SCRIPT_TESTS = [
24 {
25 'args': [
26 'gpu_perftests',
27 '--adb-path',
28 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
29 ],
30 'name': 'gpu_perftests',
31 'script': 'gtest_perf_test.py',
32 'testers': {
33 'chromium.perf': [
34 {
35 'name': 'Android Nexus5 Perf',
36 'shards': [2]
37 },
38 {
39 'name': 'Android Nexus7v2 Perf',
40 'shards': [2]
41 }
42 # crbug.com/663762
43 #{
44 # 'name': 'Android Nexus9 Perf',
45 # 'shards': [2]
46 #}
47 ],
48 }
49 },
50 {
51 'args': [
52 'cc_perftests',
53 '--adb-path',
54 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
55 ],
56 'name': 'cc_perftests',
57 'script': 'gtest_perf_test.py',
58 'testers': {
59 'chromium.perf': [
60 # crbug.com/698831
61 # {
62 # 'name': 'Android Nexus5 Perf',
63 # 'shards': [2]
64 # },
65 # {
66 # 'name': 'Android Nexus6 Perf',
67 # 'shards': [2]
68 # },
69 # {
70 # 'name': 'Android Nexus7v2 Perf',
71 # 'shards': [2]
72 # },
73 {
74 'name': 'Android Nexus9 Perf',
75 'shards': [2]
76 },
77 ],
78 }
79 },
80 {
81 'args': [
82 'tracing_perftests',
83 '--adb-path',
84 'src/third_party/catapult/devil/bin/deps/linux2/x86_64/bin/adb',
85 ],
86 'name': 'tracing_perftests',
87 'script': 'gtest_perf_test.py',
88 'testers': {
89 'chromium.perf': [
90 {
91 'name': 'Android Nexus5 Perf',
92 'shards': [2]
93 },
94 {
95 'name': 'Android Nexus6 Perf',
96 'shards': [2]
97 },
98 {
99 'name': 'Android Nexus7v2 Perf',
100 'shards': [2]
101 },
102 {
103 'name': 'Android Nexus9 Perf',
104 'shards': [2]
105 },
106 ]
107 }
108 },
109 ]
110
111
112 def add_builder(waterfall, name, additional_compile_targets=None):
113 waterfall['builders'][name] = added = {}
114 if additional_compile_targets:
115 added['additional_compile_targets'] = additional_compile_targets
116
117 return waterfall
118
119 def add_tester(waterfall, name, perf_id, platform, target_bits=64,
120 num_host_shards=1, num_device_shards=1, swarming=None,
121 use_whitelist=False):
122 del perf_id # this will be needed
123 waterfall['testers'][name] = {
124 'platform': platform,
125 'num_device_shards': num_device_shards,
126 'num_host_shards': num_host_shards,
127 'target_bits': target_bits,
128 'use_whitelist': use_whitelist
129 }
130
131 if swarming:
132 waterfall['testers'][name]['swarming_dimensions'] = swarming
133 waterfall['testers'][name]['swarming'] = True
134
135 return waterfall
136
137
138 def get_fyi_waterfall_config():
139 waterfall = {'builders':{}, 'testers': {}}
140 waterfall = add_tester(
141 waterfall, 'Win 10 Low-End Perf Tests',
142 'win-10-low-end', 'win',
143 swarming=[
144 {
145 'gpu': '1002:9874',
146 'os': 'Windows-10-10586',
147 'device_ids': [
148 'build171-b4', 'build186-b4', 'build202-b4', 'build203-b4',
149 'build204-b4', 'build205-b4', 'build206-b4', 'build207-b4',
150 'build208-b4', 'build209-b4', 'build210-b4', 'build211-b4',
151 'build212-b4', 'build213-b4', 'build214-b4', 'build215-b4',
152 'build216-b4', 'build217-b4', 'build218-b4', 'build219-b4',
153 'build220-b4', 'build221-b4']
154 }
155 ])
156 waterfall = add_tester(
157 waterfall, 'Win 10 4 Core Low-End Perf Tests',
158 'win-10-4-core-low-end', 'win',
159 swarming=[
160 {
161 'gpu': '8086:22b1',
162 'os': 'Windows-10-10586',
163 'device_ids': [
164 'build136-b1', 'build137-b1', 'build138-b1', 'build139-b1',
165 'build140-b1', 'build141-b1', 'build142-b1', 'build143-b1',
166 'build144-b1', 'build145-b1', 'build146-b1', 'build147-b1',
167 'build148-b1', 'build149-b1', 'build150-b1', 'build151-b1',
168 'build152-b1', 'build153-b1', 'build154-b1', 'build155-b1',
169 'build47-b4', 'build48-b4'],
170 'perf_tests': [
171 ('cc_perftests', 0),
172 ('gpu_perftests', 0),
173 ('load_library_perf_tests', 0),
174 ('angle_perftests', 1),
175 ('performance_browser_tests', 1),
176 ('tracing_perftests', 1)]
177 }
178 ])
179 waterfall = add_tester(
180 waterfall, 'Android Swarming N5X Tester',
181 'fyi-android-swarming-n5x', 'android',
182 swarming=[
183 {
184 'os': 'Android',
185 'android_devices': '1',
186 'device_ids': [
187 'build245-m4--device1', 'build245-m4--device2',
188 'build245-m4--device3', 'build245-m4--device4',
189 'build245-m4--device5', 'build245-m4--device6',
190 'build245-m4--device7', 'build248-m4--device1',
191 'build248-m4--device2', 'build248-m4--device3',
192 'build248-m4--device4', 'build248-m4--device5',
193 'build248-m4--device6', 'build248-m4--device7',
194 'build249-m4--device1', 'build249-m4--device2',
195 'build249-m4--device3', 'build249-m4--device4',
196 'build249-m4--device5', 'build249-m4--device6',
197 'build249-m4--device7'
198 ]
199 }
200 ])
201 return waterfall
202
203
204 def get_waterfall_config():
205 waterfall = {'builders':{}, 'testers': {}}
206
207 waterfall = add_builder(
208 waterfall, 'Android Compile', additional_compile_targets=[
209 'microdump_stackwalk'
210 ])
211 waterfall = add_builder(
212 waterfall, 'Android arm64 Compile', additional_compile_targets=[
213 'microdump_stackwalk'
214 ])
215
216 # These configurations are taken from chromium_perf.py in
217 # build/scripts/slave/recipe_modules/chromium_tests and must be kept in sync
218 # to generate the correct json for each tester
219 waterfall = add_tester(
220 waterfall, 'Android Nexus5 Perf', 'android-nexus5',
221 'android', target_bits=32, num_device_shards=7, num_host_shards=3)
222 waterfall = add_tester(
223 waterfall, 'Android Nexus5X Perf', 'android-nexus5X',
224 'android', target_bits=32, num_device_shards=7, num_host_shards=3)
225 waterfall = add_tester(
226 waterfall, 'Android Nexus6 Perf', 'android-nexus6',
227 'android', target_bits=32, num_device_shards=7, num_host_shards=3)
228 waterfall = add_tester(
229 waterfall, 'Android Nexus7v2 Perf', 'android-nexus7v2',
230 'android', target_bits=32, num_device_shards=7, num_host_shards=3)
231 waterfall = add_tester(
232 waterfall, 'Android One Perf', 'android-one',
233 'android', target_bits=32, num_device_shards=7, num_host_shards=3)
234
235 waterfall = add_tester(
236 waterfall, 'Win Zenbook Perf', 'win-zenbook', 'win',
237 swarming=[
238 {
239 'gpu': '8086:161e',
240 'os': 'Windows-10-10240',
241 'device_ids': [
242 'build30-b1', 'build31-b1',
243 'build32-b1', 'build33-b1', 'build34-b1'
244 ]
245 }
246 ])
247 waterfall = add_tester(
248 waterfall, 'Win 10 High-DPI Perf', 'win-high-dpi', 'win',
249 swarming=[
250 {
251 'gpu': '8086:1616',
252 'os': 'Windows-10-10240',
253 'device_ids': [
254 'build117-b1', 'build118-b1',
255 'build119-b1', 'build120-b1', 'build121-b1'
256 ]
257 }
258 ])
259 waterfall = add_tester(
260 waterfall, 'Win 10 Perf', 'chromium-rel-win10', 'win',
261 swarming=[
262 {
263 'gpu': '102b:0534',
264 'os': 'Windows-10-10240',
265 'device_ids': [
266 'build132-m1', 'build133-m1',
267 'build134-m1', 'build135-m1', 'build136-m1'
268 ],
269 'perf_tests': [
270 ('media_perftests', 2)]
271 }
272 ])
273 waterfall = add_tester(
274 waterfall, 'Win 8 Perf', 'chromium-rel-win8-dual', 'win',
275 swarming=[
276 {
277 'gpu': '102b:0532',
278 'os': 'Windows-2012ServerR2-SP0',
279 'device_ids': [
280 'build143-m1', 'build144-m1',
281 'build145-m1', 'build146-m1', 'build147-m1'
282 ],
283 'perf_tests': [
284 ('load_library_perf_tests', 2),
285 ('performance_browser_tests', 2),
286 ('media_perftests', 3)]
287 }
288 ])
289 waterfall = add_tester(
290 waterfall, 'Win 7 Perf', 'chromium-rel-win7-dual',
291 'win', target_bits=32,
292 swarming=[
293 {
294 'gpu': '102b:0532',
295 'os': 'Windows-2008ServerR2-SP1',
296 'device_ids': [
297 'build185-m1', 'build186-m1',
298 'build187-m1', 'build188-m1', 'build189-m1'
299 ],
300 'perf_tests': [
301 ('load_library_perf_tests', 2),
302 ('performance_browser_tests', 2),
303 ('media_perftests', 3)]
304 }
305 ])
306 waterfall = add_tester(
307 waterfall, 'Win 7 x64 Perf',
308 'chromium-rel-win7-x64-dual', 'win',
309 swarming=[
310 {
311 'gpu': '102b:0532',
312 'os': 'Windows-2008ServerR2-SP1',
313 'device_ids': [
314 'build138-m1', 'build139-m1',
315 'build140-m1', 'build141-m1', 'build142-m1'
316 ],
317 'perf_tests': [
318 ('load_library_perf_tests', 2),
319 ('performance_browser_tests', 2)]
320 }
321 ])
322 waterfall = add_tester(
323 waterfall, 'Win 7 ATI GPU Perf',
324 'chromium-rel-win7-gpu-ati', 'win',
325 swarming=[
326 {
327 'gpu': '1002:6613',
328 'os': 'Windows-2008ServerR2-SP1',
329 'device_ids': [
330 'build101-m1', 'build102-m1',
331 'build103-m1', 'build104-m1', 'build105-m1'
332 ],
333 'perf_tests': [
334 ('angle_perftests', 2),
335 ('load_library_perf_tests', 2),
336 ('performance_browser_tests', 2),
337 ('media_perftests', 3)]
338 }
339 ])
340 waterfall = add_tester(
341 waterfall, 'Win 7 Intel GPU Perf',
342 'chromium-rel-win7-gpu-intel', 'win',
343 swarming=[
344 {
345 'gpu': '8086:041a',
346 'os': 'Windows-2008ServerR2-SP1',
347 'device_ids': [
348 'build164-m1', 'build165-m1',
349 'build166-m1', 'build167-m1', 'build168-m1'
350 ],
351 'perf_tests': [
352 ('angle_perftests', 2),
353 ('load_library_perf_tests', 2),
354 ('performance_browser_tests', 2)]
355 }
356 ])
357 waterfall = add_tester(
358 waterfall, 'Win 7 Nvidia GPU Perf',
359 'chromium-rel-win7-gpu-nvidia', 'win',
360 swarming=[
361 {
362 'gpu': '10de:104a',
363 'os': 'Windows-2008ServerR2-SP1',
364 'device_ids': [
365 'build92-m1', 'build93-m1',
366 'build94-m1', 'build95-m1', 'build96-m1'
367 ],
368 'perf_tests': [
369 ('angle_perftests', 2),
370 ('load_library_perf_tests', 2),
371 ('performance_browser_tests', 2),
372 ('media_perftests', 3)]
373 }
374 ])
375
376 waterfall = add_tester(
377 waterfall, 'Mac 10.11 Perf', 'chromium-rel-mac11',
378 'mac',
379 swarming=[
380 {
381 'gpu': '8086:0166',
382 'os': 'Mac-10.11',
383 'device_ids': [
384 'build102-b1', 'build103-b1',
385 'build104-b1', 'build105-b1', 'build106-b1'
386 ],
387 'perf_tests': [
388 ('media_perftests', 3)]
389 }
390 ])
391 waterfall = add_tester(
392 waterfall, 'Mac 10.12 Perf', 'chromium-rel-mac12',
393 'mac',
394 swarming=[
395 {
396 'os': 'Mac-10.12',
397 'gpu': '8086:0a2e',
398 'device_ids': [
399 'build158-m1', 'build159-m1', 'build160-m1',
400 'build161-m1', 'build162-m1']
401 }
402 ])
403 waterfall = add_tester(
404 waterfall, 'Mac Retina Perf',
405 'chromium-rel-mac-retina', 'mac',
406 swarming=[
407 {
408 'gpu': '8086:0d26',
409 'os': 'Mac-10.11',
410 'device_ids': [
411 'build4-b1', 'build5-b1', 'build6-b1', 'build7-b1', 'build8-b1'
412 ]
413 }
414 ])
415 waterfall = add_tester(
416 waterfall, 'Mac Pro 10.11 Perf',
417 'chromium-rel-mac11-pro', 'mac',
418 swarming=[
419 {
420 'gpu': '1002:6821',
421 'os': 'Mac-10.11',
422 'device_ids': [
423 'build128-b1', 'build129-b1',
424 'build130-b1', 'build131-b1', 'build132-b1'
425 ]
426 }
427 ])
428 waterfall = add_tester(
429 waterfall, 'Mac Air 10.11 Perf',
430 'chromium-rel-mac11-air', 'mac',
431 swarming=[
432 {
433 'gpu': '8086:1626',
434 'os': 'Mac-10.11',
435 'device_ids': [
436 'build123-b1', 'build124-b1',
437 'build125-b1', 'build126-b1', 'build127-b1'
438 ]
439 }
440 ])
441 waterfall = add_tester(
442 waterfall, 'Mac Mini 8GB 10.12 Perf',
443 'chromium-rel-mac12-mini-8gb', 'mac',
444 swarming=[
445 {
446 'gpu': '8086:0a26',
447 'os': 'Mac-10.12',
448 'device_ids': [
449 'build24-b1', 'build25-b1',
450 'build26-b1', 'build27-b1', 'build28-b1'
451 ]
452 }
453 ])
454
455 waterfall = add_tester(
456 waterfall, 'Linux Perf', 'linux-release', 'linux',
457 swarming=[
458 {
459 'gpu': '102b:0534',
460 'os': 'Ubuntu-14.04',
461 'device_ids': [
462 'build148-m1', 'build149-m1',
463 'build150-m1', 'build151-m1', 'build152-m1'
464 ],
465 'perf_tests': [
466 # crbug.com/698831
467 # ('cc_perftests', 2),
468 ('load_library_perf_tests', 2),
469 ('tracing_perftests', 2),
470 ('media_perftests', 3)]
471 }
472 ])
473
474 return waterfall
475
476
477 def generate_isolate_script_entry(swarming_dimensions, test_args,
478 isolate_name, step_name, override_compile_targets=None,
479 swarming_timeout=None):
480 result = {
481 'args': test_args,
482 'isolate_name': isolate_name,
483 'name': step_name,
484 }
485 if override_compile_targets:
486 result['override_compile_targets'] = override_compile_targets
487 if swarming_dimensions:
488 result['swarming'] = {
489 # Always say this is true regardless of whether the tester
490 # supports swarming. It doesn't hurt.
491 'can_use_on_swarming_builders': True,
492 'expiration': 21600,
493 'hard_timeout': swarming_timeout if swarming_timeout else 7200,
494 'io_timeout': 3600,
495 'dimension_sets': swarming_dimensions,
496 }
497 return result
498
499
500 def generate_telemetry_test(swarming_dimensions, benchmark_name, browser):
501 # The step name must end in 'test' or 'tests' in order for the
502 # results to automatically show up on the flakiness dashboard.
503 # (At least, this was true some time ago.) Continue to use this
504 # naming convention for the time being to minimize changes.
505
506 test_args = [
507 benchmark_name,
508 '-v',
509 '--upload-results',
510 '--output-format=chartjson',
511 '--browser=%s' % browser
512 ]
513 # When this is enabled on more than just windows machines we will need
514 # --device=android
515
516 step_name = benchmark_name
517 if browser == 'reference':
518 test_args.append('--output-trace-tag=_ref')
519 step_name += '.reference'
520
521 return generate_isolate_script_entry(
522 swarming_dimensions, test_args, 'telemetry_perf_tests',
523 step_name, ['telemetry_perf_tests'],
524 swarming_timeout=BENCHMARK_SWARMING_TIMEOUTS.get(benchmark_name))
525
526
527 def script_test_enabled_on_tester(master, test, tester_name, shard):
528 for enabled_tester in test['testers'].get(master, []):
529 if enabled_tester['name'] == tester_name:
530 if shard in enabled_tester['shards']:
531 return True
532 return False
533
534
535 def generate_script_tests(master, tester_name, shard):
536 script_tests = []
537 for test in SCRIPT_TESTS:
538 if script_test_enabled_on_tester(master, test, tester_name, shard):
539 script = {
540 'args': test['args'],
541 'name': test['name'],
542 'script': test['script']
543 }
544 script_tests.append(script)
545 return script_tests
546
547
548 def get_swarming_dimension(dimension, device_affinity):
549 complete_dimension = {
550 'id': dimension['device_ids'][device_affinity],
551 'os': dimension['os'],
552 'pool': 'Chrome-perf',
553 }
554 if 'gpu' in dimension:
555 complete_dimension['gpu'] = dimension['gpu']
556 if 'android_devices' in dimension:
557 complete_dimension['android_devices'] = dimension['android_devices']
558 return complete_dimension
559
560
561 def generate_cplusplus_isolate_script_test(dimension):
562 return [
563 generate_isolate_script_entry(
564 [get_swarming_dimension(dimension, shard)], [], name, name)
565 for name, shard in dimension['perf_tests']
566 ]
567
568
569 def generate_telemetry_tests(
570 tester_config, benchmarks, benchmark_sharding_map, use_whitelist):
571 isolated_scripts = []
572 # First determine the browser that you need based on the tester
573 browser_name = ''
574 if tester_config['platform'] == 'android':
575 browser_name = 'android-chromium'
576 elif (tester_config['platform'] == 'win'
577 and tester_config['target_bits'] == 64):
578 browser_name = 'release_x64'
579 else:
580 browser_name ='release'
581
582 num_shards = len(tester_config['swarming_dimensions'][0]['device_ids'])
583 current_shard = 0
584 for benchmark in benchmarks:
585 # First figure out swarming dimensions this test needs to be triggered on.
586 # For each set of dimensions it is only triggered on one of the devices
587 swarming_dimensions = []
588 for dimension in tester_config['swarming_dimensions']:
589 device_affinity = None
590 if benchmark_sharding_map:
591 sharding_map = benchmark_sharding_map.get(str(num_shards), None)
592 if not sharding_map and not use_whitelist:
593 raise Exception('Invalid number of shards, generate new sharding map')
594 if use_whitelist:
595 device_affinity = current_shard
596 else:
597 device_affinity = sharding_map.get(benchmark.Name(), None)
598 else:
599 # No sharding map was provided, default to legacy device
600 # affinity algorithm
601 device_affinity = bot_utils.GetDeviceAffinity(
602 num_shards, benchmark.Name())
603 if device_affinity is None:
604 raise Exception('Device affinity for benchmark %s not found'
605 % benchmark.Name())
606 swarming_dimensions.append(
607 get_swarming_dimension(dimension, device_affinity))
608
609 test = generate_telemetry_test(
610 swarming_dimensions, benchmark.Name(), browser_name)
611 isolated_scripts.append(test)
612 # Now create another executable for this benchmark on the reference browser
613 reference_test = generate_telemetry_test(
614 swarming_dimensions, benchmark.Name(),'reference')
615 isolated_scripts.append(reference_test)
616 if current_shard == (num_shards - 1):
617 current_shard = 0
618 else:
619 current_shard += 1
620
621 return isolated_scripts
622
623
624 BENCHMARK_NAME_WHITELIST = set([
625 u'smoothness.top_25_smooth',
626 u'sunspider',
627 u'system_health.webview_startup',
628 u'page_cycler_v2.intl_hi_ru',
629 u'dromaeo.cssqueryjquery',
630 ])
631
632 # List of benchmarks that are to never be run on a waterfall.
633 BENCHMARK_NAME_BLACKLIST = [
634 'multipage_skpicture_printer',
635 'multipage_skpicture_printer_ct',
636 'rasterize_and_record_micro_ct',
637 'repaint_ct',
638 'multipage_skpicture_printer',
639 'multipage_skpicture_printer_ct',
640 'skpicture_printer',
641 'skpicture_printer_ct',
642 ]
643
644 # Overrides the default 2 hour timeout for swarming tasks.
645 BENCHMARK_SWARMING_TIMEOUTS = {
646 'loading.mobile': 14400,
647 }
648
649 # Certain swarming bots are not sharding correctly with the new device affinity
650 # algorithm. Reverting to legacy algorithm to try and get them to complete.
651 # See crbug.com/670284
652 LEGACY_DEVICE_AFFIINITY_ALGORITHM = [
653 'Win Zenbook Perf',
654 'Win 10 High-DPI Perf',
655 ]
656
657 def current_benchmarks(use_whitelist):
658 benchmarks_dir = os.path.join(src_dir(), 'tools', 'perf', 'benchmarks')
659 top_level_dir = os.path.dirname(benchmarks_dir)
660
661 all_benchmarks = discover.DiscoverClasses(
662 benchmarks_dir, top_level_dir, benchmark_module.Benchmark,
663 index_by_class_name=True).values()
664 # Remove all blacklisted benchmarks
665 for blacklisted in BENCHMARK_NAME_BLACKLIST:
666 for benchmark in all_benchmarks:
667 if benchmark.Name() == blacklisted:
668 all_benchmarks.remove(benchmark)
669 break
670
671 if use_whitelist:
672 all_benchmarks = (
673 bench for bench in all_benchmarks
674 if bench.Name() in BENCHMARK_NAME_WHITELIST)
675 return sorted(all_benchmarks, key=lambda b: b.Name())
676
677
678 # Returns a sorted list of (benchmark, avg_runtime) pairs for every
679 # benchmark in the all_benchmarks list where avg_runtime is in seconds. Also
680 # returns a list of benchmarks whose run time have not been seen before
681 def get_sorted_benchmark_list_by_time(all_benchmarks):
682 runtime_list = []
683 benchmark_avgs = {}
684 new_benchmarks = []
685 timing_file_path = os.path.join(src_dir(), 'tools', 'perf', 'core',
686 'desktop_benchmark_avg_times.json')
687 # Load in the avg times as calculated on Nov 1st, 2016
688 with open(timing_file_path) as f:
689 benchmark_avgs = json.load(f)
690
691 for benchmark in all_benchmarks:
692 benchmark_avg_time = benchmark_avgs.get(benchmark.Name(), None)
693 if benchmark_avg_time is None:
694 # Assume that this is a new benchmark that was added after 11/1/16 when
695 # we generated the benchmarks. Use the old affinity algorithm after
696 # we have given the rest the same distribution, add it to the
697 # new benchmarks list.
698 new_benchmarks.append(benchmark)
699 else:
700 # Need to multiple the seconds by 2 since we will be generating two tests
701 # for each benchmark to be run on the same shard for the reference build
702 runtime_list.append((benchmark, benchmark_avg_time * 2.0))
703
704 # Return a reverse sorted list by runtime
705 runtime_list.sort(key=lambda tup: tup[1], reverse=True)
706 return runtime_list, new_benchmarks
707
708
709 # Returns a map of benchmark name to shard it is on.
710 def shard_benchmarks(num_shards, all_benchmarks):
711 benchmark_to_shard_dict = {}
712 shard_execution_times = [0] * num_shards
713 sorted_benchmark_list, new_benchmarks = get_sorted_benchmark_list_by_time(
714 all_benchmarks)
715 # Iterate over in reverse order and add them to the current smallest bucket.
716 for benchmark in sorted_benchmark_list:
717 # Find current smallest bucket
718 min_index = shard_execution_times.index(min(shard_execution_times))
719 benchmark_to_shard_dict[benchmark[0].Name()] = min_index
720 shard_execution_times[min_index] += benchmark[1]
721 # For all the benchmarks that didn't have avg run times, use the default
722 # device affinity algorithm
723 for benchmark in new_benchmarks:
724 device_affinity = bot_utils.GetDeviceAffinity(num_shards, benchmark.Name())
725 benchmark_to_shard_dict[benchmark.Name()] = device_affinity
726 return benchmark_to_shard_dict
727
728
729 def generate_all_tests(waterfall):
730 tests = {}
731
732 all_benchmarks = current_benchmarks(False)
733 whitelist_benchmarks = current_benchmarks(True)
734 # Get benchmark sharding according to common sharding configurations
735 # Currently we only have bots sharded 5 directions and 1 direction
736 benchmark_sharding_map = {}
737 benchmark_sharding_map['22'] = shard_benchmarks(22, all_benchmarks)
738 benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks)
739 benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks)
740 benchmark_sharding_map['21'] = shard_benchmarks(21, all_benchmarks)
741
742 for name, config in waterfall['testers'].iteritems():
743 use_whitelist = config['use_whitelist']
744 benchmark_list = all_benchmarks
745 if use_whitelist:
746 benchmark_list = whitelist_benchmarks
747 if config.get('swarming', False):
748 # Our current configuration only ever has one set of swarming dimensions
749 # Make sure this still holds true
750 if len(config['swarming_dimensions']) > 1:
751 raise Exception('Invalid assumption on number of swarming dimensions')
752 # Generate benchmarks
753 sharding_map = benchmark_sharding_map
754 if name in LEGACY_DEVICE_AFFIINITY_ALGORITHM:
755 sharding_map = None
756 isolated_scripts = generate_telemetry_tests(
757 config, benchmark_list, sharding_map, use_whitelist)
758 # Generate swarmed non-telemetry tests if present
759 if config['swarming_dimensions'][0].get('perf_tests', False):
760 isolated_scripts += generate_cplusplus_isolate_script_test(
761 config['swarming_dimensions'][0])
762 tests[name] = {
763 'isolated_scripts': sorted(isolated_scripts, key=lambda x: x['name'])
764 }
765 else:
766 # scripts are only currently run in addition to the main waterfall. They
767 # are currently the only thing generated in the perf json file.
768 # TODO eyaich: will need to handle the sharding differently when we have
769 # swarmed bots on the main waterfall.
770 for shard in range(0, config['num_host_shards']):
771 tester_name = '%s (%d)' % (name, shard + 1)
772 scripts = generate_script_tests(waterfall['name'], name, shard + 1)
773 if scripts:
774 tests[tester_name] = {
775 'scripts': sorted(scripts, key=lambda x: x['name'])
776 }
777
778 for name, config in waterfall['builders'].iteritems():
779 tests[name] = config
780
781 tests['AAAAA1 AUTOGENERATED FILE DO NOT EDIT'] = {}
782 tests['AAAAA2 See //tools/perf/generate_perf_json.py to make changes'] = {}
783 return tests
784
785
786 def get_json_config_file_for_waterfall(waterfall):
787 filename = '%s.json' % waterfall['name']
788 buildbot_dir = os.path.join(src_dir(), 'testing', 'buildbot')
789 return os.path.join(buildbot_dir, filename)
790
791
792 def tests_are_up_to_date(waterfall):
793 tests = generate_all_tests(waterfall)
794 tests_data = json.dumps(tests, indent=2, separators=(',', ': '),
795 sort_keys=True)
796 config_file = get_json_config_file_for_waterfall(waterfall)
797 with open(config_file, 'r') as fp:
798 config_data = fp.read().strip()
799 return tests_data == config_data
800
801
802 def update_all_tests(waterfall):
803 tests = generate_all_tests(waterfall)
804 config_file = get_json_config_file_for_waterfall(waterfall)
805 with open(config_file, 'w') as fp:
806 json.dump(tests, fp, indent=2, separators=(',', ': '), sort_keys=True)
807 fp.write('\n')
808
809
810 def src_dir():
811 file_path = os.path.abspath(__file__)
812 return os.path.dirname(os.path.dirname(
813 os.path.dirname(os.path.dirname(file_path))))
814
815
816 def main(args):
817 parser = argparse.ArgumentParser(
818 description=('Generate perf test\' json config. This need to be done '
819 'anytime you add/remove any existing benchmarks in '
820 'tools/perf/benchmarks.'))
821 parser.add_argument(
822 '--validate-only', action='store_true', default=False,
823 help=('Validate whether the perf json generated will be the same as the '
824 'existing configs. This does not change the contain of existing '
825 'configs'))
826 options = parser.parse_args(args)
827
828 waterfall = get_waterfall_config()
829 waterfall['name'] = 'chromium.perf'
830 fyi_waterfall = get_fyi_waterfall_config()
831 fyi_waterfall['name'] = 'chromium.perf.fyi'
832
833 if options.validate_only:
834 if tests_are_up_to_date(fyi_waterfall) and tests_are_up_to_date(waterfall):
835 print 'All the perf JSON config files are up-to-date. \\o/'
836 return 0
837 else:
838 print ('The perf JSON config files are not up-to-date. Please run %s '
839 'without --validate-only flag to update the perf JSON '
840 'configs.') % sys.argv[0]
841 return 1
842 else:
843 update_all_tests(fyi_waterfall)
844 update_all_tests(waterfall)
845 return 0
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698