Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(281)

Side by Side Diff: tools/perf/benchmarks/dom_perf.py

Issue 1142453002: [Telemetry] Remove page_test.DidRunTest hook. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/telemetry/telemetry/internal/story_runner_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import json 5 import json
6 import math 6 import math
7 import os 7 import os
8 8
9 from telemetry import benchmark 9 from telemetry import benchmark
10 from telemetry.core import util 10 from telemetry.core import util
(...skipping 21 matching lines...) Expand all
32 mean = math.pow(math.e, (log_sum / len(new_values))) 32 mean = math.pow(math.e, (log_sum / len(new_values)))
33 # Return the rounded mean. 33 # Return the rounded mean.
34 return int(round(mean)) 34 return int(round(mean))
35 35
36 36
37 SCORE_UNIT = 'score (bigger is better)' 37 SCORE_UNIT = 'score (bigger is better)'
38 SCORE_TRACE_NAME = 'score' 38 SCORE_TRACE_NAME = 'score'
39 39
40 40
41 class _DomPerfMeasurement(page_test.PageTest): 41 class _DomPerfMeasurement(page_test.PageTest):
42 def __init__(self): 42 def __init__(self, expected_num_runs):
43 super(_DomPerfMeasurement, self).__init__() 43 super(_DomPerfMeasurement, self).__init__()
44 self._expected_num_runs = expected_num_runs
45 self._runs_count = 0
44 46
45 def ValidateAndMeasurePage(self, page, tab, results): 47 def ValidateAndMeasurePage(self, page, tab, results):
46 try: 48 try:
47 def _IsDone(): 49 def _IsDone():
48 return tab.GetCookieByName('__domperf_finished') == '1' 50 return tab.GetCookieByName('__domperf_finished') == '1'
49 util.WaitFor(_IsDone, 600) 51 util.WaitFor(_IsDone, 600)
50 52
51 data = json.loads(tab.EvaluateJavaScript('__domperf_result')) 53 data = json.loads(tab.EvaluateJavaScript('__domperf_result'))
52 for suite in data['BenchmarkSuites']: 54 for suite in data['BenchmarkSuites']:
53 # Skip benchmarks that we didn't actually run this time around. 55 # Skip benchmarks that we didn't actually run this time around.
54 if len(suite['Benchmarks']) or suite['score']: 56 if len(suite['Benchmarks']) or suite['score']:
55 results.AddValue(scalar.ScalarValue( 57 results.AddValue(scalar.ScalarValue(
56 results.current_page, '%s.%s' % (suite['name'], SCORE_TRACE_NAME), 58 results.current_page, '%s.%s' % (suite['name'], SCORE_TRACE_NAME),
57 SCORE_UNIT, suite['score'], important=False)) 59 SCORE_UNIT, suite['score'], important=False))
60
58 finally: 61 finally:
59 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"') 62 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"')
63 self._runs_count += 1
64 # Only compute total metric when we reach the number of expected run.
65 if self._runs_count == self._expected_num_runs:
66 self._ComputeTotalMetric(results)
60 67
61 def DidRunTest(self, browser, results): 68 def _ComputeTotalMetric(self, results):
62 # Now give the geometric mean as the total for the combined runs. 69 # Now give the geometric mean as the total for the combined runs.
63 combined = merge_values.MergeLikeValuesFromDifferentPages( 70 combined = merge_values.MergeLikeValuesFromDifferentPages(
64 results.all_page_specific_values, 71 results.all_page_specific_values,
65 group_by_name_suffix=True) 72 group_by_name_suffix=True)
66 combined_score = [x for x in combined if x.name == SCORE_TRACE_NAME][0] 73 combined_score = [x for x in combined if x.name == SCORE_TRACE_NAME][0]
67 total = _GeometricMean(combined_score.values) 74 total = _GeometricMean(combined_score.values)
68 results.AddSummaryValue( 75 results.AddSummaryValue(
69 scalar.ScalarValue(None, 'Total.' + SCORE_TRACE_NAME, SCORE_UNIT, 76 scalar.ScalarValue(None, 'Total.' + SCORE_TRACE_NAME, SCORE_UNIT,
70 total)) 77 total))
71 78
72 79
73 @benchmark.Disabled('android', 'linux') # http://crbug.com/458540 80 @benchmark.Disabled('android', 'linux') # http://crbug.com/458540
74 class DomPerf(benchmark.Benchmark): 81 class DomPerf(benchmark.Benchmark):
75 """A suite of JavaScript benchmarks for exercising the browser's DOM. 82 """A suite of JavaScript benchmarks for exercising the browser's DOM.
76 83
77 The final score is computed as the geometric mean of the individual results. 84 The final score is computed as the geometric mean of the individual results.
78 Scores are not comparable across benchmark suite versions and higher scores 85 Scores are not comparable across benchmark suite versions and higher scores
79 means better performance: Bigger is better!""" 86 means better performance: Bigger is better!"""
80 test = _DomPerfMeasurement 87
88 RUN_PARAMS = [
89 'Accessors',
90 'CloneNodes',
91 'CreateNodes',
92 'DOMDivWalk',
93 'DOMTable',
94 'DOMWalk',
95 'Events',
96 'Get+Elements',
97 'GridSort',
98 'Template'
99 ]
81 100
82 @classmethod 101 @classmethod
83 def Name(cls): 102 def Name(cls):
84 return 'dom_perf' 103 return 'dom_perf'
85 104
105 def CreatePageTest(self, options):
106 del options
107 return _DomPerfMeasurement(len(self.RUN_PARAMS))
108
86 def CreatePageSet(self, options): 109 def CreatePageSet(self, options):
87 dom_perf_dir = os.path.join(util.GetChromiumSrcDir(), 'data', 'dom_perf') 110 dom_perf_dir = os.path.join(util.GetChromiumSrcDir(), 'data', 'dom_perf')
88 run_params = [
89 'Accessors',
90 'CloneNodes',
91 'CreateNodes',
92 'DOMDivWalk',
93 'DOMTable',
94 'DOMWalk',
95 'Events',
96 'Get+Elements',
97 'GridSort',
98 'Template'
99 ]
100 ps = page_set.PageSet(file_path=dom_perf_dir) 111 ps = page_set.PageSet(file_path=dom_perf_dir)
101 for param in run_params: 112 for param in self.RUN_PARAMS:
102 ps.AddUserStory(page_module.Page( 113 ps.AddUserStory(page_module.Page(
103 'file://run.html?reportInJS=1&run=%s' % param, ps, ps.base_dir)) 114 'file://run.html?reportInJS=1&run=%s' % param, ps, ps.base_dir))
104 return ps 115 return ps
OLDNEW
« no previous file with comments | « no previous file | tools/telemetry/telemetry/internal/story_runner_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698