Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(734)

Side by Side Diff: tools/perf/metrics/webrtc_stats.py

Issue 2561603003: Add encoding time and and fps to webrtc.stress case. (Closed)
Patch Set: Don't log more than 5 conns Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/perf/measurements/webrtc.py ('k') | tools/perf/metrics/webrtc_stats_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2014 The Chromium Authors. All rights reserved. 1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import json 5 import json
6 import logging 6 import logging
7 import re 7 import re
8 8
9 from telemetry.internal.util import camel_case 9 from telemetry.internal.util import camel_case
10 from telemetry.value import list_of_scalar_values 10 from telemetry.value import list_of_scalar_values
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
63 'description': 'How much receive bandwidth we estimate we have.' 63 'description': 'How much receive bandwidth we estimate we have.'
64 }, 64 },
65 'googTargetEncBitrate': { 65 'googTargetEncBitrate': {
66 'units': 'bit/s', 66 'units': 'bit/s',
67 'description': ('The target encoding bitrate we estimate is good to ' 67 'description': ('The target encoding bitrate we estimate is good to '
68 'aim for given our bandwidth estimates.') 68 'aim for given our bandwidth estimates.')
69 }, 69 },
70 } 70 }
71 71
72 72
73 def SelectMetrics(particular_metrics):
74 if not particular_metrics:
75 return INTERESTING_METRICS
76
77 # You can only select among the predefined interesting metrics.
78 assert set(particular_metrics).issubset(INTERESTING_METRICS.keys())
79 return {key: value for key, value in INTERESTING_METRICS.iteritems()
80 if key in particular_metrics}
81
82
73 def GetReportKind(report): 83 def GetReportKind(report):
74 if 'audioInputLevel' in report or 'audioOutputLevel' in report: 84 if 'audioInputLevel' in report or 'audioOutputLevel' in report:
75 return 'audio' 85 return 'audio'
76 if 'googFrameRateSent' in report or 'googFrameRateReceived' in report: 86 if 'googFrameRateSent' in report or 'googFrameRateReceived' in report:
77 return 'video' 87 return 'video'
78 if 'googAvailableSendBandwidth' in report: 88 if 'googAvailableSendBandwidth' in report:
79 return 'bwe' 89 return 'bwe'
80 90
81 logging.debug('Did not recognize report batch: %s.', report.keys()) 91 logging.debug('Did not recognize report batch: %s.', report.keys())
82 92
83 # There are other kinds of reports, such as transport types, which we don't 93 # There are other kinds of reports, such as transport types, which we don't
84 # care about here. For these cases just return 'unknown' which will ignore the 94 # care about here. For these cases just return 'unknown' which will ignore the
85 # report. 95 # report.
86 return 'unknown' 96 return 'unknown'
87 97
88 98
89 def DistinguishAudioVideoOrBwe(report, stat_name): 99 def DistinguishAudioVideoOrBwe(report, stat_name):
90 return GetReportKind(report) + '_' + stat_name 100 return GetReportKind(report) + '_' + stat_name
91 101
92 102
93 def StripAudioVideoBweDistinction(stat_name): 103 def StripAudioVideoBweDistinction(stat_name):
94 return re.sub('^(audio|video|bwe)_', '', stat_name) 104 return re.sub('^(audio|video|bwe)_', '', stat_name)
95 105
96 106
97 def SortStatsIntoTimeSeries(report_batches): 107 def SortStatsIntoTimeSeries(report_batches, selected_metrics):
98 time_series = {} 108 time_series = {}
99 for report_batch in report_batches: 109 for report_batch in report_batches:
100 for report in report_batch: 110 for report in report_batch:
101 for stat_name, value in report.iteritems(): 111 for stat_name, value in report.iteritems():
102 if stat_name not in INTERESTING_METRICS: 112 if stat_name not in selected_metrics:
103 continue 113 continue
104 if GetReportKind(report) == 'unknown': 114 if GetReportKind(report) == 'unknown':
105 continue 115 continue
106 full_stat_name = DistinguishAudioVideoOrBwe(report, stat_name) 116 full_stat_name = DistinguishAudioVideoOrBwe(report, stat_name)
107 time_series.setdefault(full_stat_name, []).append(float(value)) 117 time_series.setdefault(full_stat_name, []).append(float(value))
108 118
109 return time_series 119 return time_series
110 120
111 121
122 def PrintSpecialMarkerValue(results):
123 results.AddValue(list_of_scalar_values.ListOfScalarValues(
124 results.current_page, 'peer_connection_5_not_logging_more_conns',
125 '', [17], description=('This marker signifies we never log more '
126 'than 5 peer connections'),
127 important=False))
128
129
112 class WebRtcStatisticsMetric(Metric): 130 class WebRtcStatisticsMetric(Metric):
113 """Makes it possible to measure stats from peer connections.""" 131 """Makes it possible to measure stats from peer connections."""
114 132
115 def __init__(self): 133 def __init__(self, particular_metrics=None):
116 super(WebRtcStatisticsMetric, self).__init__() 134 super(WebRtcStatisticsMetric, self).__init__()
117 self._all_reports = None 135 self._all_reports = None
136 self._selected_metrics = SelectMetrics(particular_metrics)
118 137
119 def Start(self, page, tab): 138 def Start(self, page, tab):
120 pass 139 pass
121 140
122 def Stop(self, page, tab): 141 def Stop(self, page, tab):
123 """Digs out stats from data populated by the javascript in webrtc_cases.""" 142 """Digs out stats from data populated by the javascript in webrtc_cases."""
124 self._all_reports = tab.EvaluateJavaScript( 143 self._all_reports = tab.EvaluateJavaScript(
125 'JSON.stringify(window.peerConnectionReports)') 144 'JSON.stringify(window.peerConnectionReports)')
126 145
127 def AddResults(self, tab, results): 146 def AddResults(self, tab, results):
128 if not self._all_reports: 147 if not self._all_reports:
129 return 148 return
130 149
131 reports = json.loads(self._all_reports) 150 reports = json.loads(self._all_reports)
132 for i, report in enumerate(reports): 151 for i, report in enumerate(reports):
133 time_series = SortStatsIntoTimeSeries(report) 152 time_series = SortStatsIntoTimeSeries(report, self._selected_metrics)
153
154 # Only ever show stats for 5 peer connections, or it's going to look
155 # insane in the results.
156 if i > 5:
157 PrintSpecialMarkerValue(results)
158 return
134 159
135 for stat_name, values in time_series.iteritems(): 160 for stat_name, values in time_series.iteritems():
136 stat_name_underscored = camel_case.ToUnderscore(stat_name) 161 stat_name_underscored = camel_case.ToUnderscore(stat_name)
137 trace_name = 'peer_connection_%d_%s' % (i, stat_name_underscored) 162 trace_name = 'peer_connection_%d_%s' % (i, stat_name_underscored)
138 general_name = StripAudioVideoBweDistinction(stat_name) 163 general_name = StripAudioVideoBweDistinction(stat_name)
139 results.AddValue(list_of_scalar_values.ListOfScalarValues( 164 results.AddValue(list_of_scalar_values.ListOfScalarValues(
140 results.current_page, trace_name, 165 results.current_page, trace_name,
141 INTERESTING_METRICS[general_name]['units'], values, 166 INTERESTING_METRICS[general_name]['units'], values,
142 description=INTERESTING_METRICS[general_name]['description'], 167 description=INTERESTING_METRICS[general_name]['description'],
143 important=False)) 168 important=False))
OLDNEW
« no previous file with comments | « tools/perf/measurements/webrtc.py ('k') | tools/perf/metrics/webrtc_stats_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698