OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 import collections | |
5 import math | |
6 import sys | |
7 | |
8 from telemetry.timeline import model as model_module | |
9 from telemetry.timeline import async_slice as async_slice_module | |
10 from telemetry.value import scalar | |
11 from telemetry.value import list_of_scalar_values | |
12 from telemetry.web_perf.metrics import timeline_based_metric | |
13 | |
14 TOPLEVEL_GL_CATEGORY = 'gpu_toplevel' | |
15 TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service' | |
16 TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device' | |
17 | |
18 SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer') | |
19 DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer') | |
20 | |
21 TRACKED_NAMES = { 'RenderCompositor': 'render_compositor', | |
22 'BrowserCompositor': 'browser_compositor', | |
23 'Compositor': 'browser_compositor' } | |
24 | |
25 GPU_SERVICE_DEVICE_VARIANCE = 5 | |
26 | |
27 | |
28 def CalculateFrameTimes(events_per_frame): | |
29 """Given a list of events per frame, returns a list of frame times.""" | |
30 times_per_frame = [] | |
31 for event_list in events_per_frame: | |
32 # Prefer to use thread_duration but use duration as fallback. | |
33 event_times = [(event.thread_duration or event.duration) | |
34 for event in event_list] | |
35 times_per_frame.append(sum(event_times)) | |
36 return times_per_frame | |
37 | |
38 | |
39 def TimelineName(name, source_type, value_type): | |
40 """Constructs the standard name given in the timeline. | |
41 | |
42 Args: | |
43 name: The name of the timeline, for example "total", or "render_compositor". | |
44 source_type: One of "cpu", "gpu" or None. None is only used for total times. | |
45 value_type: the type of value. For example "mean", "stddev"...etc. | |
46 """ | |
47 if source_type: | |
48 return '%s_%s_%s_time' % (name, value_type, source_type) | |
49 else: | |
50 return '%s_%s_time' % (name, value_type) | |
51 | |
52 | |
53 class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric): | |
54 """Computes GPU based metrics.""" | |
55 | |
56 def __init__(self): | |
57 super(GPUTimelineMetric, self).__init__() | |
58 | |
59 def AddResults(self, model, _, interaction_records, results): | |
nednguyen
2015/01/24 00:32:22
Does this implementation support overlapped intera
David Yen
2015/01/24 00:41:10
Done.
| |
60 service_times = self._CalculateGPUTimelineData(model) | |
61 for value_item, durations in service_times.iteritems(): | |
62 count = len(durations) | |
63 avg = 0.0 | |
64 stddev = 0.0 | |
65 maximum = 0.0 | |
66 if count: | |
67 avg = sum(durations) / count | |
68 stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count) | |
69 maximum = max(durations) | |
70 | |
71 name, src = value_item | |
72 | |
73 if src: | |
74 frame_times_name = '%s_%s_frame_times' % (name, src) | |
75 else: | |
76 frame_times_name = '%s_frame_times' % (name) | |
77 | |
78 if durations: | |
79 results.AddValue(list_of_scalar_values.ListOfScalarValues( | |
80 results.current_page, frame_times_name, 'ms', durations)) | |
81 | |
82 results.AddValue(scalar.ScalarValue(results.current_page, | |
83 TimelineName(name, src, 'max'), | |
84 'ms', maximum)) | |
85 results.AddValue(scalar.ScalarValue(results.current_page, | |
86 TimelineName(name, src, 'mean'), | |
87 'ms', avg)) | |
88 results.AddValue(scalar.ScalarValue(results.current_page, | |
89 TimelineName(name, src, 'stddev'), | |
90 'ms', stddev)) | |
91 | |
92 def _CalculateGPUTimelineData(self, model): | |
93 """Uses the model and calculates the times for various values for each | |
94 frame. The return value will be a dictionary of the following format: | |
95 { | |
96 EVENT_NAME1: [FRAME0_TIME, FRAME1_TIME...etc.], | |
97 EVENT_NAME2: [FRAME0_TIME, FRAME1_TIME...etc.], | |
98 } | |
99 | |
100 Event Names: | |
101 mean_frame - Mean time each frame is calculated to be. | |
102 mean_gpu_service-cpu: Mean time the GPU service took per frame. | |
103 mean_gpu_device-gpu: Mean time the GPU device took per frame. | |
104 TRACKED_NAMES_service-cpu: Using the TRACKED_NAMES dictionary, we | |
105 include service traces per frame for the | |
106 tracked name. | |
107 TRACKED_NAMES_device-gpu: Using the TRACKED_NAMES dictionary, we | |
108 include device traces per frame for the | |
109 tracked name. | |
110 """ | |
111 all_service_events = [] | |
112 current_service_frame_end = sys.maxint | |
113 current_service_events = [] | |
114 | |
115 all_device_events = [] | |
116 current_device_frame_end = sys.maxint | |
117 current_device_events = [] | |
118 | |
119 tracked_events = {} | |
120 tracked_events.update(dict([((value, 'cpu'), []) | |
121 for value in TRACKED_NAMES.itervalues()])) | |
122 tracked_events.update(dict([((value, 'gpu'), []) | |
123 for value in TRACKED_NAMES.itervalues()])) | |
124 | |
125 current_tracked_service_events = collections.defaultdict(list) | |
126 current_tracked_device_events = collections.defaultdict(list) | |
127 | |
128 event_iter = model.IterAllEvents( | |
129 event_type_predicate=model_module.IsSliceOrAsyncSlice) | |
130 for event in event_iter: | |
131 # Look for frame end markers | |
132 if (event.category, event.name) == SERVICE_FRAME_END_MARKER: | |
133 current_service_frame_end = event.end | |
134 elif (event.category, event.name) == DEVICE_FRAME_END_MARKER: | |
135 current_device_frame_end = event.end | |
136 | |
137 # Track all other toplevel gl category markers | |
138 elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY: | |
139 base_name = event.name | |
140 dash_index = base_name.rfind('-') | |
141 if dash_index != -1: | |
142 base_name = base_name[:dash_index] | |
143 tracked_name = TRACKED_NAMES.get(base_name, None) | |
144 | |
145 if event.category == TOPLEVEL_SERVICE_CATEGORY: | |
146 # Check if frame has ended. | |
147 if event.start >= current_service_frame_end: | |
148 if current_service_events: | |
149 all_service_events.append(current_service_events) | |
150 for value in TRACKED_NAMES.itervalues(): | |
151 tracked_events[(value, 'cpu')].append( | |
152 current_tracked_service_events[value]) | |
153 current_service_events = [] | |
154 current_service_frame_end = sys.maxint | |
155 current_tracked_service_events.clear() | |
156 | |
157 current_service_events.append(event) | |
158 if tracked_name: | |
159 current_tracked_service_events[tracked_name].append(event) | |
160 | |
161 elif event.category == TOPLEVEL_DEVICE_CATEGORY: | |
162 # Check if frame has ended. | |
163 if event.start >= current_device_frame_end: | |
164 if current_device_events: | |
165 all_device_events.append(current_device_events) | |
166 for value in TRACKED_NAMES.itervalues(): | |
167 tracked_events[(value, 'gpu')].append( | |
168 current_tracked_device_events[value]) | |
169 current_device_events = [] | |
170 current_device_frame_end = sys.maxint | |
171 current_tracked_device_events.clear() | |
172 | |
173 current_device_events.append(event) | |
174 if tracked_name: | |
175 current_tracked_device_events[tracked_name].append(event) | |
176 | |
177 # Append Data for Last Frame. | |
178 if current_service_events: | |
179 all_service_events.append(current_service_events) | |
180 for value in TRACKED_NAMES.itervalues(): | |
181 tracked_events[(value, 'cpu')].append( | |
182 current_tracked_service_events[value]) | |
183 if current_device_events: | |
184 all_device_events.append(current_device_events) | |
185 for value in TRACKED_NAMES.itervalues(): | |
186 tracked_events[(value, 'gpu')].append( | |
187 current_tracked_device_events[value]) | |
188 | |
189 # Calculate Mean Frame Time for the CPU side. | |
190 frame_times = [] | |
191 if all_service_events: | |
192 prev_frame_end = all_service_events[0][0].start | |
193 for event_list in all_service_events: | |
194 last_service_event_in_frame = event_list[-1] | |
195 frame_times.append(last_service_event_in_frame.end - prev_frame_end) | |
196 prev_frame_end = last_service_event_in_frame.end | |
197 | |
198 # Create the timeline data dictionary for service side traces. | |
199 total_frame_value = ('total', None) | |
200 cpu_frame_value = ('total', 'cpu') | |
201 gpu_frame_value = ('total', 'gpu') | |
202 timeline_data = {} | |
203 timeline_data[total_frame_value] = frame_times | |
204 timeline_data[cpu_frame_value] = CalculateFrameTimes(all_service_events) | |
205 for value in TRACKED_NAMES.itervalues(): | |
206 cpu_value = (value, 'cpu') | |
207 timeline_data[cpu_value] = CalculateFrameTimes(tracked_events[cpu_value]) | |
208 | |
209 # Add in GPU side traces if it was supported (IE. device traces exist). | |
210 if all_device_events: | |
211 timeline_data[gpu_frame_value] = CalculateFrameTimes(all_device_events) | |
212 for value in TRACKED_NAMES.itervalues(): | |
213 gpu_value = (value, 'gpu') | |
214 tracked_gpu_event = tracked_events[gpu_value] | |
215 timeline_data[gpu_value] = CalculateFrameTimes(tracked_gpu_event) | |
216 | |
217 return timeline_data | |
OLD | NEW |