Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(774)

Side by Side Diff: telemetry/telemetry/internal/story_runner.py

Issue 2749633004: Track telemetry benchmark cycle time (Closed)
Patch Set: Add tests Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2014 The Chromium Authors. All rights reserved. 1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import logging 5 import logging
6 import optparse 6 import optparse
7 import os 7 import os
8 import subprocess 8 import subprocess
9 import sys 9 import sys
10 import time 10 import time
11 11
12 import py_utils 12 import py_utils
13 from py_utils import cloud_storage # pylint: disable=import-error 13 from py_utils import cloud_storage # pylint: disable=import-error
14 14
15 from telemetry.core import exceptions 15 from telemetry.core import exceptions
16 from telemetry import decorators 16 from telemetry import decorators
17 from telemetry.internal.actions import page_action 17 from telemetry.internal.actions import page_action
18 from telemetry.internal.browser import browser_finder 18 from telemetry.internal.browser import browser_finder
19 from telemetry.internal.results import results_options 19 from telemetry.internal.results import results_options
20 from telemetry.internal.util import exception_formatter 20 from telemetry.internal.util import exception_formatter
21 from telemetry import page 21 from telemetry import page
22 from telemetry.page import legacy_page_test 22 from telemetry.page import legacy_page_test
23 from telemetry import story as story_module 23 from telemetry import story as story_module
24 from telemetry.util import wpr_modes 24 from telemetry.util import wpr_modes
25 from telemetry.value import failure 25 from telemetry.value import failure
26 from telemetry.value import skip 26 from telemetry.value import skip
27 from telemetry.value import scalar
27 from telemetry.web_perf import story_test 28 from telemetry.web_perf import story_test
28 29
29 30
30 class ArchiveError(Exception): 31 class ArchiveError(Exception):
31 pass 32 pass
32 33
33 34
34 def AddCommandLineArgs(parser): 35 def AddCommandLineArgs(parser):
35 story_module.StoryFilter.AddCommandLineArgs(parser) 36 story_module.StoryFilter.AddCommandLineArgs(parser)
36 results_options.AddResultsOptions(parser) 37 results_options.AddResultsOptions(parser)
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
294 story.display_name) 295 story.display_name)
295 296
296 297
297 def RunBenchmark(benchmark, finder_options): 298 def RunBenchmark(benchmark, finder_options):
298 """Run this test with the given options. 299 """Run this test with the given options.
299 300
300 Returns: 301 Returns:
301 The number of failure values (up to 254) or 255 if there is an uncaught 302 The number of failure values (up to 254) or 255 if there is an uncaught
302 exception. 303 exception.
303 """ 304 """
305 start = time.time()
304 benchmark.CustomizeBrowserOptions(finder_options.browser_options) 306 benchmark.CustomizeBrowserOptions(finder_options.browser_options)
305 307
306 benchmark_metadata = benchmark.GetMetadata() 308 benchmark_metadata = benchmark.GetMetadata()
307 possible_browser = browser_finder.FindBrowser(finder_options) 309 possible_browser = browser_finder.FindBrowser(finder_options)
308 if not possible_browser: 310 if not possible_browser:
309 print ('Cannot find browser of type %s. To list out all ' 311 print ('Cannot find browser of type %s. To list out all '
310 'available browsers, rerun your command with ' 312 'available browsers, rerun your command with '
311 '--browser=list' % finder_options.browser_options.browser_type) 313 '--browser=list' % finder_options.browser_options.browser_type)
312 return 1 314 return 1
313 if (possible_browser and 315 if (possible_browser and
314 not decorators.IsBenchmarkEnabled(benchmark, possible_browser)): 316 not decorators.IsBenchmarkEnabled(benchmark, possible_browser)):
315 print '%s is disabled on the selected browser' % benchmark.Name() 317 print '%s is disabled on the selected browser' % benchmark.Name()
316 if finder_options.run_disabled_tests: 318 if finder_options.run_disabled_tests:
317 print 'Running benchmark anyway due to: --also-run-disabled-tests' 319 print 'Running benchmark anyway due to: --also-run-disabled-tests'
318 else: 320 else:
319 print 'Try --also-run-disabled-tests to force the benchmark to run.' 321 print 'Try --also-run-disabled-tests to force the benchmark to run.'
320 # If chartjson is specified, this will print a dict indicating the 322 # If chartjson is specified, this will print a dict indicating the
321 # benchmark name and disabled state. 323 # benchmark name and disabled state.
322 with results_options.CreateResults( 324 with results_options.CreateResults(
323 benchmark_metadata, finder_options, 325 benchmark_metadata, finder_options,
324 benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False 326 benchmark.ValueCanBeAddedPredicate, benchmark_enabled=False
325 ) as results: 327 ) as results:
326 results.PrintSummary() 328 results.PrintSummary()
327 # When a disabled benchmark is run we now want to return success since 329 # When a disabled benchmark is run we now want to return success since
328 # we are no longer filtering these out in the buildbot recipes. 330 # we are no longer filtering these out in the buildbot recipes.
329 return 0 331 return 17
330 332
331 pt = benchmark.CreatePageTest(finder_options) 333 pt = benchmark.CreatePageTest(finder_options)
332 pt.__name__ = benchmark.__class__.__name__ 334 pt.__name__ = benchmark.__class__.__name__
333 335
334 disabled_attr_name = decorators.DisabledAttributeName(benchmark) 336 disabled_attr_name = decorators.DisabledAttributeName(benchmark)
335 # pylint: disable=protected-access 337 # pylint: disable=protected-access
336 pt._disabled_strings = getattr(benchmark, disabled_attr_name, set()) 338 pt._disabled_strings = getattr(benchmark, disabled_attr_name, set())
337 if hasattr(benchmark, '_enabled_strings'): 339 if hasattr(benchmark, '_enabled_strings'):
338 # pylint: disable=protected-access 340 # pylint: disable=protected-access
339 pt._enabled_strings = benchmark._enabled_strings 341 pt._enabled_strings = benchmark._enabled_strings
(...skipping 19 matching lines...) Expand all
359 benchmark_metadata, finder_options, 361 benchmark_metadata, finder_options,
360 benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results: 362 benchmark.ValueCanBeAddedPredicate, benchmark_enabled=True) as results:
361 try: 363 try:
362 Run(pt, stories, finder_options, results, benchmark.max_failures, 364 Run(pt, stories, finder_options, results, benchmark.max_failures,
363 should_tear_down_state_after_each_story_run, 365 should_tear_down_state_after_each_story_run,
364 benchmark.ShouldTearDownStateAfterEachStorySetRun()) 366 benchmark.ShouldTearDownStateAfterEachStorySetRun())
365 return_code = min(254, len(results.failures)) 367 return_code = min(254, len(results.failures))
366 except Exception: 368 except Exception:
367 exception_formatter.PrintFormattedException() 369 exception_formatter.PrintFormattedException()
368 return_code = 255 370 return_code = 255
371 finally:
372 duration = time.time() - start
nednguyen 2017/03/17 20:49:32 I mean duration should be after the finally block
369 373
370 try: 374 try:
371 if finder_options.upload_results: 375 if finder_options.upload_results:
372 bucket = finder_options.upload_bucket 376 bucket = finder_options.upload_bucket
373 if bucket in cloud_storage.BUCKET_ALIASES: 377 if bucket in cloud_storage.BUCKET_ALIASES:
374 bucket = cloud_storage.BUCKET_ALIASES[bucket] 378 bucket = cloud_storage.BUCKET_ALIASES[bucket]
375 results.UploadTraceFilesToCloud(bucket) 379 results.UploadTraceFilesToCloud(bucket)
376 results.UploadProfilingFilesToCloud(bucket) 380 results.UploadProfilingFilesToCloud(bucket)
377 finally: 381 finally:
382 results.AddSummaryValue(scalar.ScalarValue(
383 None, 'BenchmarkDuration', 'minutes', duration / 60))
nednguyen 2017/03/17 20:55:08 nits: divide by 60.0
martiniss 2017/03/23 23:22:21 Done
378 results.PrintSummary() 384 results.PrintSummary()
379 return return_code 385 return return_code
380 386
381 387
382 def _UpdateAndCheckArchives(archive_data_file, wpr_archive_info, 388 def _UpdateAndCheckArchives(archive_data_file, wpr_archive_info,
383 filtered_stories): 389 filtered_stories):
384 """Verifies that all stories are local or have WPR archives. 390 """Verifies that all stories are local or have WPR archives.
385 391
386 Logs warnings and returns False if any are missing. 392 Logs warnings and returns False if any are missing.
387 """ 393 """
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
458 logging.warning('Device is thermally throttled before running ' 464 logging.warning('Device is thermally throttled before running '
459 'performance tests, results will vary.') 465 'performance tests, results will vary.')
460 466
461 467
462 def _CheckThermalThrottling(platform): 468 def _CheckThermalThrottling(platform):
463 if not platform.CanMonitorThermalThrottling(): 469 if not platform.CanMonitorThermalThrottling():
464 return 470 return
465 if platform.HasBeenThermallyThrottled(): 471 if platform.HasBeenThermallyThrottled():
466 logging.warning('Device has been thermally throttled during ' 472 logging.warning('Device has been thermally throttled during '
467 'performance tests, results will vary.') 473 'performance tests, results will vary.')
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698