Index: tools/perf/metrics/gpu_timeline.py |
diff --git a/tools/perf/metrics/gpu_timeline.py b/tools/perf/metrics/gpu_timeline.py |
new file mode 100644 |
index 0000000000000000000000000000000000000000..78e2dee9a7b4b56b9298a19762329406b87d4661 |
--- /dev/null |
+++ b/tools/perf/metrics/gpu_timeline.py |
@@ -0,0 +1,217 @@ |
+# Copyright 2015 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+import collections |
+import math |
+import sys |
+ |
+from telemetry.timeline import model as model_module |
+from telemetry.timeline import async_slice as async_slice_module |
+from telemetry.value import scalar |
+from telemetry.value import list_of_scalar_values |
+from telemetry.web_perf.metrics import timeline_based_metric |
+ |
+TOPLEVEL_GL_CATEGORY = 'gpu_toplevel' |
+TOPLEVEL_SERVICE_CATEGORY = 'disabled-by-default-gpu.service' |
+TOPLEVEL_DEVICE_CATEGORY = 'disabled-by-default-gpu.device' |
+ |
+SERVICE_FRAME_END_MARKER = (TOPLEVEL_SERVICE_CATEGORY, 'SwapBuffer') |
+DEVICE_FRAME_END_MARKER = (TOPLEVEL_DEVICE_CATEGORY, 'SwapBuffer') |
+ |
+TRACKED_NAMES = { 'RenderCompositor': 'render_compositor', |
+ 'BrowserCompositor': 'browser_compositor', |
+ 'Compositor': 'browser_compositor' } |
+ |
+GPU_SERVICE_DEVICE_VARIANCE = 5 |
+ |
+ |
+def CalculateFrameTimes(events_per_frame): |
+ """Given a list of events per frame, returns a list of frame times.""" |
+ times_per_frame = [] |
+ for event_list in events_per_frame: |
+ # Prefer to use thread_duration but use duration as fallback. |
+ event_times = [(event.thread_duration or event.duration) |
+ for event in event_list] |
+ times_per_frame.append(sum(event_times)) |
+ return times_per_frame |
+ |
+ |
+def TimelineName(name, source_type, value_type): |
+ """Constructs the standard name given in the timeline. |
+ |
+ Args: |
+ name: The name of the timeline, for example "total", or "render_compositor". |
+ source_type: One of "cpu", "gpu" or None. None is only used for total times. |
+ value_type: the type of value. For example "mean", "stddev"...etc. |
+ """ |
+ if source_type: |
+ return '%s_%s_%s_time' % (name, value_type, source_type) |
+ else: |
+ return '%s_%s_time' % (name, value_type) |
+ |
+ |
+class GPUTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
+ """Computes GPU based metrics.""" |
+ |
+ def __init__(self): |
+ super(GPUTimelineMetric, self).__init__() |
+ |
+ def AddResults(self, model, _, interaction_records, results): |
nednguyen
2015/01/24 00:32:22
Does this implementation support overlapped intera
David Yen
2015/01/24 00:41:10
Done.
|
+ service_times = self._CalculateGPUTimelineData(model) |
+ for value_item, durations in service_times.iteritems(): |
+ count = len(durations) |
+ avg = 0.0 |
+ stddev = 0.0 |
+ maximum = 0.0 |
+ if count: |
+ avg = sum(durations) / count |
+ stddev = math.sqrt(sum((d - avg) ** 2 for d in durations) / count) |
+ maximum = max(durations) |
+ |
+ name, src = value_item |
+ |
+ if src: |
+ frame_times_name = '%s_%s_frame_times' % (name, src) |
+ else: |
+ frame_times_name = '%s_frame_times' % (name) |
+ |
+ if durations: |
+ results.AddValue(list_of_scalar_values.ListOfScalarValues( |
+ results.current_page, frame_times_name, 'ms', durations)) |
+ |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ TimelineName(name, src, 'max'), |
+ 'ms', maximum)) |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ TimelineName(name, src, 'mean'), |
+ 'ms', avg)) |
+ results.AddValue(scalar.ScalarValue(results.current_page, |
+ TimelineName(name, src, 'stddev'), |
+ 'ms', stddev)) |
+ |
+ def _CalculateGPUTimelineData(self, model): |
+ """Uses the model and calculates the times for various values for each |
+ frame. The return value will be a dictionary of the following format: |
+ { |
+ EVENT_NAME1: [FRAME0_TIME, FRAME1_TIME...etc.], |
+ EVENT_NAME2: [FRAME0_TIME, FRAME1_TIME...etc.], |
+ } |
+ |
+ Event Names: |
+ mean_frame - Mean time each frame is calculated to be. |
+ mean_gpu_service-cpu: Mean time the GPU service took per frame. |
+ mean_gpu_device-gpu: Mean time the GPU device took per frame. |
+ TRACKED_NAMES_service-cpu: Using the TRACKED_NAMES dictionary, we |
+ include service traces per frame for the |
+ tracked name. |
+ TRACKED_NAMES_device-gpu: Using the TRACKED_NAMES dictionary, we |
+ include device traces per frame for the |
+ tracked name. |
+ """ |
+ all_service_events = [] |
+ current_service_frame_end = sys.maxint |
+ current_service_events = [] |
+ |
+ all_device_events = [] |
+ current_device_frame_end = sys.maxint |
+ current_device_events = [] |
+ |
+ tracked_events = {} |
+ tracked_events.update(dict([((value, 'cpu'), []) |
+ for value in TRACKED_NAMES.itervalues()])) |
+ tracked_events.update(dict([((value, 'gpu'), []) |
+ for value in TRACKED_NAMES.itervalues()])) |
+ |
+ current_tracked_service_events = collections.defaultdict(list) |
+ current_tracked_device_events = collections.defaultdict(list) |
+ |
+ event_iter = model.IterAllEvents( |
+ event_type_predicate=model_module.IsSliceOrAsyncSlice) |
+ for event in event_iter: |
+ # Look for frame end markers |
+ if (event.category, event.name) == SERVICE_FRAME_END_MARKER: |
+ current_service_frame_end = event.end |
+ elif (event.category, event.name) == DEVICE_FRAME_END_MARKER: |
+ current_device_frame_end = event.end |
+ |
+ # Track all other toplevel gl category markers |
+ elif event.args.get('gl_category', None) == TOPLEVEL_GL_CATEGORY: |
+ base_name = event.name |
+ dash_index = base_name.rfind('-') |
+ if dash_index != -1: |
+ base_name = base_name[:dash_index] |
+ tracked_name = TRACKED_NAMES.get(base_name, None) |
+ |
+ if event.category == TOPLEVEL_SERVICE_CATEGORY: |
+ # Check if frame has ended. |
+ if event.start >= current_service_frame_end: |
+ if current_service_events: |
+ all_service_events.append(current_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[(value, 'cpu')].append( |
+ current_tracked_service_events[value]) |
+ current_service_events = [] |
+ current_service_frame_end = sys.maxint |
+ current_tracked_service_events.clear() |
+ |
+ current_service_events.append(event) |
+ if tracked_name: |
+ current_tracked_service_events[tracked_name].append(event) |
+ |
+ elif event.category == TOPLEVEL_DEVICE_CATEGORY: |
+ # Check if frame has ended. |
+ if event.start >= current_device_frame_end: |
+ if current_device_events: |
+ all_device_events.append(current_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[(value, 'gpu')].append( |
+ current_tracked_device_events[value]) |
+ current_device_events = [] |
+ current_device_frame_end = sys.maxint |
+ current_tracked_device_events.clear() |
+ |
+ current_device_events.append(event) |
+ if tracked_name: |
+ current_tracked_device_events[tracked_name].append(event) |
+ |
+ # Append Data for Last Frame. |
+ if current_service_events: |
+ all_service_events.append(current_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[(value, 'cpu')].append( |
+ current_tracked_service_events[value]) |
+ if current_device_events: |
+ all_device_events.append(current_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ tracked_events[(value, 'gpu')].append( |
+ current_tracked_device_events[value]) |
+ |
+ # Calculate Mean Frame Time for the CPU side. |
+ frame_times = [] |
+ if all_service_events: |
+ prev_frame_end = all_service_events[0][0].start |
+ for event_list in all_service_events: |
+ last_service_event_in_frame = event_list[-1] |
+ frame_times.append(last_service_event_in_frame.end - prev_frame_end) |
+ prev_frame_end = last_service_event_in_frame.end |
+ |
+ # Create the timeline data dictionary for service side traces. |
+ total_frame_value = ('total', None) |
+ cpu_frame_value = ('total', 'cpu') |
+ gpu_frame_value = ('total', 'gpu') |
+ timeline_data = {} |
+ timeline_data[total_frame_value] = frame_times |
+ timeline_data[cpu_frame_value] = CalculateFrameTimes(all_service_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ cpu_value = (value, 'cpu') |
+ timeline_data[cpu_value] = CalculateFrameTimes(tracked_events[cpu_value]) |
+ |
+ # Add in GPU side traces if it was supported (IE. device traces exist). |
+ if all_device_events: |
+ timeline_data[gpu_frame_value] = CalculateFrameTimes(all_device_events) |
+ for value in TRACKED_NAMES.itervalues(): |
+ gpu_value = (value, 'gpu') |
+ tracked_gpu_event = tracked_events[gpu_value] |
+ timeline_data[gpu_value] = CalculateFrameTimes(tracked_gpu_event) |
+ |
+ return timeline_data |