Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(514)

Side by Side Diff: tools/perf/benchmarks/dom_perf.py

Issue 27486002: Cleanup of page_measurement_results object (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: keep on trying Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/perf/benchmarks/peacekeeper.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import json 5 import json
6 import math 6 import math
7 import os 7 import os
8 8
9 from telemetry import test 9 from telemetry import test
10 from telemetry.core import util 10 from telemetry.core import util
11 from telemetry.page import page_measurement 11 from telemetry.page import page_measurement
12 from telemetry.page import page_set 12 from telemetry.page import page_set
13 from telemetry.value import merge_values
13 14
14 15
15 def _GeometricMean(values): 16 def _GeometricMean(values):
16 """Compute a rounded geometric mean from an array of values.""" 17 """Compute a rounded geometric mean from an array of values."""
17 if not values: 18 if not values:
18 return None 19 return None
19 # To avoid infinite value errors, make sure no value is less than 0.001. 20 # To avoid infinite value errors, make sure no value is less than 0.001.
20 new_values = [] 21 new_values = []
21 for value in values: 22 for value in values:
22 if value > 0.001: 23 if value > 0.001:
(...skipping 27 matching lines...) Expand all
50 for suite in data['BenchmarkSuites']: 51 for suite in data['BenchmarkSuites']:
51 # Skip benchmarks that we didn't actually run this time around. 52 # Skip benchmarks that we didn't actually run this time around.
52 if len(suite['Benchmarks']) or suite['score']: 53 if len(suite['Benchmarks']) or suite['score']:
53 results.Add(SCORE_TRACE_NAME, SCORE_UNIT, 54 results.Add(SCORE_TRACE_NAME, SCORE_UNIT,
54 suite['score'], suite['name'], 'unimportant') 55 suite['score'], suite['name'], 'unimportant')
55 finally: 56 finally:
56 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"') 57 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"')
57 58
58 def DidRunTest(self, browser, results): 59 def DidRunTest(self, browser, results):
59 # Now give the geometric mean as the total for the combined runs. 60 # Now give the geometric mean as the total for the combined runs.
60 scores = [] 61 combined = merge_values.MergeLikeValuesFromDifferentPages(
61 for result in results.page_results: 62 results.all_page_specific_values,
62 scores.append(result[SCORE_TRACE_NAME].output_value) 63 group_by_name_suffix=True)
63 total = _GeometricMean(scores) 64 combined_score = [x for x in combined if x.name == SCORE_TRACE_NAME][0]
65 total = _GeometricMean(combined_score.values)
64 results.AddSummary(SCORE_TRACE_NAME, SCORE_UNIT, total, 'Total') 66 results.AddSummary(SCORE_TRACE_NAME, SCORE_UNIT, total, 'Total')
65 67
66 68
67 class DomPerf(test.Test): 69 class DomPerf(test.Test):
68 """A suite of JavaScript benchmarks for exercising the browser's DOM. 70 """A suite of JavaScript benchmarks for exercising the browser's DOM.
69 71
70 The final score is computed as the geometric mean of the individual results. 72 The final score is computed as the geometric mean of the individual results.
71 Scores are not comparable across benchmark suite versions and higher scores 73 Scores are not comparable across benchmark suite versions and higher scores
72 means better performance: Bigger is better!""" 74 means better performance: Bigger is better!"""
73 test = _DomPerfMeasurement 75 test = _DomPerfMeasurement
74 76
75 def CreatePageSet(self, options): 77 def CreatePageSet(self, options):
76 dom_perf_dir = os.path.join(util.GetChromiumSrcDir(), 'data', 'dom_perf') 78 dom_perf_dir = os.path.join(util.GetChromiumSrcDir(), 'data', 'dom_perf')
77 base_page = 'file://run.html?reportInJS=1&run=' 79 base_page = 'file://run.html?reportInJS=1&run='
78 return page_set.PageSet.FromDict({ 80 return page_set.PageSet.FromDict({
79 'pages': [ 81 'pages': [
80 { 'url': base_page + 'Accessors' }, 82 { 'url': base_page + 'Accessors' },
81 { 'url': base_page + 'CloneNodes' }, 83 { 'url': base_page + 'CloneNodes' },
82 { 'url': base_page + 'CreateNodes' }, 84 { 'url': base_page + 'CreateNodes' },
83 { 'url': base_page + 'DOMDivWalk' }, 85 { 'url': base_page + 'DOMDivWalk' },
84 { 'url': base_page + 'DOMTable' }, 86 { 'url': base_page + 'DOMTable' },
85 { 'url': base_page + 'DOMWalk' }, 87 { 'url': base_page + 'DOMWalk' },
86 { 'url': base_page + 'Events' }, 88 { 'url': base_page + 'Events' },
87 { 'url': base_page + 'Get+Elements' }, 89 { 'url': base_page + 'Get+Elements' },
88 { 'url': base_page + 'GridSort' }, 90 { 'url': base_page + 'GridSort' },
89 { 'url': base_page + 'Template' } 91 { 'url': base_page + 'Template' }
90 ] 92 ]
91 }, dom_perf_dir) 93 }, dom_perf_dir)
OLDNEW
« no previous file with comments | « no previous file | tools/perf/benchmarks/peacekeeper.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698