OLD | NEW |
(Empty) | |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. |
| 4 import collections |
| 5 import math |
| 6 import sys |
| 7 |
| 8 from telemetry.timeline import model as model_module |
| 9 from telemetry.timeline import async_slice as async_slice_module |
| 10 from telemetry.value import scalar |
| 11 from telemetry.web_perf.metrics import timeline_based_metric |
| 12 |
| 13 TOPLEVEL_GL_CATEGORY = 'gpu_toplevel' |
| 14 TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service' |
| 15 TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device' |
| 16 |
| 17 SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer') |
| 18 DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer') |
| 19 |
| 20 TRACKED_NAMES = { 'RenderCompositor': 'render_compositor', |
| 21 'Compositor': 'compositor' } |
| 22 |
| 23 GPU_SERVICE_DEVICE_VARIANCE = 5 |
| 24 |
| 25 |
| 26 def GetTrackedServiceName(name): |
| 27 return name + '_service-cpu' |
| 28 |
| 29 |
| 30 def GetTrackedDeviceName(name): |
| 31 return name + '_device-gpu' |
| 32 |
| 33 |
| 34 def CalculateFrameTimes(events_per_frame): |
| 35 """Given a list of events per frame, returns a list of frame times.""" |
| 36 times_per_frame = [] |
| 37 for event_list in events_per_frame: |
| 38 # Prefer to use thread_duration but use duration as fallback. |
| 39 event_times = [(event.thread_duration or event.duration) |
| 40 for event in event_list] |
| 41 times_per_frame.append(sum(event_times)) |
| 42 return times_per_frame |
| 43 |
| 44 |
| 45 class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
| 46 """Computes GPU based metrics.""" |
| 47 |
| 48 def __init__(self): |
| 49 super(GPUTimelineMetric, self).__init__() |
| 50 |
| 51 def AddResults(self, model, _, interaction_records, results): |
| 52 service_times = self._CalculateGPUTimelineData(model) |
| 53 for name, durations in service_times.iteritems(): |
| 54 count = len(durations) |
| 55 avg = 0.0 |
| 56 stddev = 0.0 |
| 57 maximum = 0.0 |
| 58 if count: |
| 59 avg = sum(durations) / count |
| 60 stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count) |
| 61 maximum = max(durations) |
| 62 |
| 63 results.AddValue(scalar.ScalarValue(results.current_page, |
| 64 name + '_max', 'ms', maximum)) |
| 65 results.AddValue(scalar.ScalarValue(results.current_page, |
| 66 name + '_avg', 'ms', avg)) |
| 67 results.AddValue(scalar.ScalarValue(results.current_page, |
| 68 name + '_stddev', 'ms', stddev)) |
| 69 |
| 70 def _CalculateGPUTimelineData(self, model): |
| 71 """Uses the model and calculates the times for various values for each |
| 72 frame. The return value will be a dictionary of the following format: |
| 73 { |
| 74 EVENT_NAME1: [FRAME0_TIME, FRAME1_TIME...etc.], |
| 75 EVENT_NAME2: [FRAME0_TIME, FRAME1_TIME...etc.], |
| 76 } |
| 77 |
| 78 Event Names: |
| 79 mean_frame - Mean time each frame is calculated to be. |
| 80 mean_gpu_service-cpu: Mean time the GPU service took per frame. |
| 81 mean_gpu_device-gpu: Mean time the GPU device took per frame. |
| 82 TRACKED_NAMES_service-cpu: Using the TRACKED_NAMES dictionary, we |
| 83 include service traces per frame for the |
| 84 tracked name. |
| 85 TRACKED_NAMES_device-gpu: Using the TRACKED_NAMES dictionary, we |
| 86 include device traces per frame for the |
| 87 tracked name. |
| 88 """ |
| 89 all_service_events = [] |
| 90 current_service_frame_end = sys.maxint |
| 91 current_service_events = [] |
| 92 |
| 93 all_device_events = [] |
| 94 current_device_frame_end = sys.maxint |
| 95 current_device_events = [] |
| 96 |
| 97 tracked_events = {} |
| 98 tracked_events.update(dict([(GetTrackedServiceName(value), []) |
| 99 for value in TRACKED_NAMES.itervalues()])) |
| 100 tracked_events.update(dict([(GetTrackedDeviceName(value), []) |
| 101 for value in TRACKED_NAMES.itervalues()])) |
| 102 |
| 103 current_tracked_service_events = collections.defaultdict(list) |
| 104 current_tracked_device_events = collections.defaultdict(list) |
| 105 |
| 106 event_iter = model.IterAllEvents( |
| 107 event_type_predicate=model_module.IsSliceOrAsyncSlice) |
| 108 for event in event_iter: |
| 109 # Look for frame end markers |
| 110 if (event.category, event.name) == SERVICE_FRAME_END_MARKER: |
| 111 current_service_frame_end = event.end |
| 112 elif (event.category, event.name) == DEVICE_FRAME_END_MARKER: |
| 113 current_device_frame_end = event.end |
| 114 |
| 115 # Track all other toplevel gl category markers |
| 116 elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY: |
| 117 base_name = event.name |
| 118 dash_index = base_name.rfind('-') |
| 119 if dash_index != -1: |
| 120 base_name = base_name[:dash_index] |
| 121 tracked_name = TRACKED_NAMES.get(base_name, None) |
| 122 |
| 123 if event.category == TOPLEVEL_SERVICE_CATEGORY: |
| 124 # Check if frame has ended. |
| 125 if event.start >= current_service_frame_end: |
| 126 if current_service_events: |
| 127 all_service_events.append(current_service_events) |
| 128 for value in TRACKED_NAMES.itervalues(): |
| 129 tracked_events[GetTrackedServiceName(value)].append( |
| 130 current_tracked_service_events[value]) |
| 131 current_service_events = [] |
| 132 current_service_frame_end = sys.maxint |
| 133 current_tracked_service_events.clear() |
| 134 |
| 135 current_service_events.append(event) |
| 136 if tracked_name: |
| 137 current_tracked_service_events[tracked_name].append(event) |
| 138 |
| 139 elif event.category == TOPLEVEL_DEVICE_CATEGORY: |
| 140 # Check if frame has ended. |
| 141 if event.start >= current_device_frame_end: |
| 142 if current_device_events: |
| 143 all_device_events.append(current_device_events) |
| 144 for value in TRACKED_NAMES.itervalues(): |
| 145 tracked_events[GetTrackedDeviceName(value)].append( |
| 146 current_tracked_device_events[value]) |
| 147 current_device_events = [] |
| 148 current_device_frame_end = sys.maxint |
| 149 current_tracked_device_events.clear() |
| 150 |
| 151 current_device_events.append(event) |
| 152 if tracked_name: |
| 153 current_tracked_device_events[tracked_name].append(event) |
| 154 |
| 155 # Append Data for Last Frame. |
| 156 if current_service_events: |
| 157 all_service_events.append(current_service_events) |
| 158 for value in TRACKED_NAMES.itervalues(): |
| 159 tracked_events[GetTrackedServiceName(value)].append( |
| 160 current_tracked_service_events[value]) |
| 161 if current_device_events: |
| 162 all_device_events.append(current_device_events) |
| 163 for value in TRACKED_NAMES.itervalues(): |
| 164 tracked_events[GetTrackedDeviceName(value)].append( |
| 165 current_tracked_device_events[value]) |
| 166 |
| 167 # Calculate Mean Frame Time for the CPU side. |
| 168 frame_times = [] |
| 169 if all_service_events: |
| 170 prev_frame_end = all_service_events[0][0].start |
| 171 for event_list in all_service_events: |
| 172 last_service_event_in_frame = event_list[-1] |
| 173 frame_times.append(last_service_event_in_frame.end - prev_frame_end) |
| 174 prev_frame_end = last_service_event_in_frame.end |
| 175 |
| 176 # Create the timeline data dictionary for service side traces. |
| 177 gpu_service_name = GetTrackedServiceName('mean_gpu') |
| 178 |
| 179 timeline_data = {} |
| 180 timeline_data['mean_frame'] = frame_times |
| 181 timeline_data[gpu_service_name] = CalculateFrameTimes(all_service_events) |
| 182 for value in TRACKED_NAMES.itervalues(): |
| 183 name = GetTrackedServiceName(value) |
| 184 timeline_data[name] = CalculateFrameTimes(tracked_events[name]) |
| 185 |
| 186 # Add in GPU side traces if it was supported (IE. device traces exist). |
| 187 if all_device_events: |
| 188 gpu_device_name = GetTrackedDeviceName('mean_gpu') |
| 189 |
| 190 timeline_data[gpu_device_name] = CalculateFrameTimes(all_device_events) |
| 191 for value in TRACKED_NAMES.itervalues(): |
| 192 name = GetTrackedDeviceName(value) |
| 193 timeline_data[name] = CalculateFrameTimes(tracked_events[name]) |
| 194 |
| 195 return timeline_data |
OLD | NEW |