OLD | NEW |
(Empty) | |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 import collections |
| 5 import math |
| 6 import sys |
| 7 |
| 8 from telemetry.timeline import model as model_module |
| 9 from telemetry.value import scalar |
| 10 from telemetry.value import list_of_scalar_values |
| 11 from telemetry.web_perf.metrics import timeline_based_metric |
| 12 |
| 13 TOPLEVEL_GL_CATEGORY = 'gpu_toplevel' |
| 14 TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service' |
| 15 TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device' |
| 16 |
| 17 SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer') |
| 18 DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer') |
| 19 |
| 20 TRACKED_GL_CONTEXT_NAME = { 'RenderCompositor': 'render_compositor', |
| 21 'BrowserCompositor': 'browser_compositor', |
| 22 'Compositor': 'browser_compositor' } |
| 23 |
| 24 |
| 25 def _CalculateFrameTimes(events_per_frame, event_data_func): |
| 26 """Given a list of events per frame and a function to extract event time data, |
| 27 returns a list of frame times.""" |
| 28 times_per_frame = [] |
| 29 for event_list in events_per_frame: |
| 30 event_times = [event_data_func(event) for event in event_list] |
| 31 times_per_frame.append(sum(event_times)) |
| 32 return times_per_frame |
| 33 |
| 34 |
| 35 def _CPUFrameTimes(events_per_frame): |
| 36 """Given a list of events per frame, returns a list of CPU frame times.""" |
| 37 # CPU event frames are calculated using the event thread duration. |
| 38 # Some platforms do not support thread_duration, convert those to 0. |
| 39 return _CalculateFrameTimes(events_per_frame, |
| 40 lambda event : event.thread_duration or 0) |
| 41 |
| 42 |
| 43 def _GPUFrameTimes(events_per_frame): |
| 44 """Given a list of events per frame, returns a list of GPU frame times.""" |
| 45 # GPU event frames are asynchronous slices which use the event duration. |
| 46 return _CalculateFrameTimes(events_per_frame, |
| 47 lambda event : event.duration) |
| 48 |
| 49 |
| 50 def TimelineName(name, source_type, value_type): |
| 51 """Constructs the standard name given in the timeline. |
| 52 |
| 53 Args: |
| 54 name: The name of the timeline, for example "total", or "render_compositor". |
| 55 source_type: One of "cpu", "gpu" or None. None is only used for total times. |
| 56 value_type: the type of value. For example "mean", "stddev"...etc. |
| 57 """ |
| 58 if source_type: |
| 59 return '%s_%s_%s_time' % (name, value_type, source_type) |
| 60 else: |
| 61 return '%s_%s_time' % (name, value_type) |
| 62 |
| 63 |
| 64 class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
| 65 """Computes GPU based metrics.""" |
| 66 |
| 67 def __init__(self): |
| 68 super(GPUTimelineMetric, self).__init__() |
| 69 |
| 70 def AddResults(self, model, _, interaction_records, results): |
| 71 self.VerifyNonOverlappedRecords(interaction_records) |
| 72 service_times = self._CalculateGPUTimelineData(model) |
| 73 for value_item, durations in service_times.iteritems(): |
| 74 count = len(durations) |
| 75 avg = 0.0 |
| 76 stddev = 0.0 |
| 77 maximum = 0.0 |
| 78 if count: |
| 79 avg = sum(durations) / count |
| 80 stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count) |
| 81 maximum = max(durations) |
| 82 |
| 83 name, src = value_item |
| 84 |
| 85 if src: |
| 86 frame_times_name = '%s_%s_frame_times' % (name, src) |
| 87 else: |
| 88 frame_times_name = '%s_frame_times' % (name) |
| 89 |
| 90 if durations: |
| 91 results.AddValue(list_of_scalar_values.ListOfScalarValues( |
| 92 results.current_page, frame_times_name, 'ms', durations)) |
| 93 |
| 94 results.AddValue(scalar.ScalarValue(results.current_page, |
| 95 TimelineName(name, src, 'max'), |
| 96 'ms', maximum)) |
| 97 results.AddValue(scalar.ScalarValue(results.current_page, |
| 98 TimelineName(name, src, 'mean'), |
| 99 'ms', avg)) |
| 100 results.AddValue(scalar.ScalarValue(results.current_page, |
| 101 TimelineName(name, src, 'stddev'), |
| 102 'ms', stddev)) |
| 103 |
| 104 def _CalculateGPUTimelineData(self, model): |
| 105 """Uses the model and calculates the times for various values for each |
| 106 frame. The return value will be a dictionary of the following format: |
| 107 { |
| 108 (EVENT_NAME1, SRC1_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.], |
| 109 (EVENT_NAME2, SRC2_TYPE): [FRAME0_TIME, FRAME1_TIME...etc.], |
| 110 } |
| 111 |
| 112 Events: |
| 113 swap - The time in milliseconds between each swap marker. |
| 114 total - The amount of time spent in the renderer thread. |
| 115 TRACKED_NAMES: Using the TRACKED_GL_CONTEXT_NAME dict, we |
| 116 include the traces per frame for the |
| 117 tracked name. |
| 118 Source Types: |
| 119 None - This will only be valid for the "swap" event. |
| 120 cpu - For an event, the "cpu" source type signifies time spent on the |
| 121 gpu thread using the CPU. This uses the "gpu.service" markers. |
| 122 gpu - For an event, the "gpu" source type signifies time spent on the |
| 123 gpu thread using the GPU. This uses the "gpu.device" markers. |
| 124 """ |
| 125 all_service_events = [] |
| 126 current_service_frame_end = sys.maxint |
| 127 current_service_events = [] |
| 128 |
| 129 all_device_events = [] |
| 130 current_device_frame_end = sys.maxint |
| 131 current_device_events = [] |
| 132 |
| 133 tracked_events = {} |
| 134 tracked_events.update( |
| 135 dict([((value, 'cpu'), []) |
| 136 for value in TRACKED_GL_CONTEXT_NAME.itervalues()])) |
| 137 tracked_events.update( |
| 138 dict([((value, 'gpu'), []) |
| 139 for value in TRACKED_GL_CONTEXT_NAME.itervalues()])) |
| 140 |
| 141 # These will track traces within the current frame. |
| 142 current_tracked_service_events = collections.defaultdict(list) |
| 143 current_tracked_device_events = collections.defaultdict(list) |
| 144 |
| 145 event_iter = model.IterAllEvents( |
| 146 event_type_predicate=model_module.IsSliceOrAsyncSlice) |
| 147 for event in event_iter: |
| 148 # Look for frame end markers |
| 149 if (event.category, event.name) == SERVICE_FRAME_END_MARKER: |
| 150 current_service_frame_end = event.end |
| 151 elif (event.category, event.name) == DEVICE_FRAME_END_MARKER: |
| 152 current_device_frame_end = event.end |
| 153 |
| 154 # Track all other toplevel gl category markers |
| 155 elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY: |
| 156 base_name = event.name |
| 157 dash_index = base_name.rfind('-') |
| 158 if dash_index != -1: |
| 159 base_name = base_name[:dash_index] |
| 160 tracked_name = TRACKED_GL_CONTEXT_NAME.get(base_name, None) |
| 161 |
| 162 if event.category == TOPLEVEL_SERVICE_CATEGORY: |
| 163 # Check if frame has ended. |
| 164 if event.start >= current_service_frame_end: |
| 165 if current_service_events: |
| 166 all_service_events.append(current_service_events) |
| 167 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 168 tracked_events[(value, 'cpu')].append( |
| 169 current_tracked_service_events[value]) |
| 170 current_service_events = [] |
| 171 current_service_frame_end = sys.maxint |
| 172 current_tracked_service_events.clear() |
| 173 |
| 174 current_service_events.append(event) |
| 175 if tracked_name: |
| 176 current_tracked_service_events[tracked_name].append(event) |
| 177 |
| 178 elif event.category == TOPLEVEL_DEVICE_CATEGORY: |
| 179 # Check if frame has ended. |
| 180 if event.start >= current_device_frame_end: |
| 181 if current_device_events: |
| 182 all_device_events.append(current_device_events) |
| 183 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 184 tracked_events[(value, 'gpu')].append( |
| 185 current_tracked_device_events[value]) |
| 186 current_device_events = [] |
| 187 current_device_frame_end = sys.maxint |
| 188 current_tracked_device_events.clear() |
| 189 |
| 190 current_device_events.append(event) |
| 191 if tracked_name: |
| 192 current_tracked_device_events[tracked_name].append(event) |
| 193 |
| 194 # Append Data for Last Frame. |
| 195 if current_service_events: |
| 196 all_service_events.append(current_service_events) |
| 197 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 198 tracked_events[(value, 'cpu')].append( |
| 199 current_tracked_service_events[value]) |
| 200 if current_device_events: |
| 201 all_device_events.append(current_device_events) |
| 202 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 203 tracked_events[(value, 'gpu')].append( |
| 204 current_tracked_device_events[value]) |
| 205 |
| 206 # Calculate Mean Frame Time for the CPU side. |
| 207 frame_times = [] |
| 208 if all_service_events: |
| 209 prev_frame_end = all_service_events[0][0].start |
| 210 for event_list in all_service_events: |
| 211 last_service_event_in_frame = event_list[-1] |
| 212 frame_times.append(last_service_event_in_frame.end - prev_frame_end) |
| 213 prev_frame_end = last_service_event_in_frame.end |
| 214 |
| 215 # Create the timeline data dictionary for service side traces. |
| 216 total_frame_value = ('swap', None) |
| 217 cpu_frame_value = ('total', 'cpu') |
| 218 gpu_frame_value = ('total', 'gpu') |
| 219 timeline_data = {} |
| 220 timeline_data[total_frame_value] = frame_times |
| 221 timeline_data[cpu_frame_value] = _CPUFrameTimes(all_service_events) |
| 222 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 223 cpu_value = (value, 'cpu') |
| 224 timeline_data[cpu_value] = _CPUFrameTimes(tracked_events[cpu_value]) |
| 225 |
| 226 # Add in GPU side traces if it was supported (IE. device traces exist). |
| 227 if all_device_events: |
| 228 timeline_data[gpu_frame_value] = _GPUFrameTimes(all_device_events) |
| 229 for value in TRACKED_GL_CONTEXT_NAME.itervalues(): |
| 230 gpu_value = (value, 'gpu') |
| 231 tracked_gpu_event = tracked_events[gpu_value] |
| 232 timeline_data[gpu_value] = _GPUFrameTimes(tracked_gpu_event) |
| 233 |
| 234 return timeline_data |
OLD | NEW |