| Index: tools/perf/metrics/gpu_timeline_unittest.py
|
| diff --git a/tools/perf/metrics/gpu_timeline_unittest.py b/tools/perf/metrics/gpu_timeline_unittest.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..8cac059ac096bc1f5bbba934919db9f110f33749
|
| --- /dev/null
|
| +++ b/tools/perf/metrics/gpu_timeline_unittest.py
|
| @@ -0,0 +1,307 @@
|
| +# Copyright 2015 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +
|
| +import unittest
|
| +
|
| +from metrics import test_page_test_results
|
| +from metrics import gpu_timeline
|
| +from telemetry.timeline import async_slice
|
| +from telemetry.timeline import model as model_module
|
| +from telemetry.web_perf import timeline_interaction_record as tir_module
|
| +
|
| +FRAME_END_CATEGORY, FRAME_END_NAME = gpu_timeline.FRAME_END_MARKER
|
| +INTERACTION_RECORDS = [tir_module.TimelineInteractionRecord("test-record",
|
| + 0,
|
| + float('inf'))]
|
| +
|
| +
|
| +def _CreateGPUAsyncSlices(name, start_time, duration, offset=0):
|
| + args = { 'gl_category': gpu_timeline.TOPLEVEL_GL_CATEGORY }
|
| + return (async_slice.AsyncSlice(gpu_timeline.TOPLEVEL_SERVICE_CATEGORY,
|
| + name, start_time,
|
| + args=args,
|
| + duration=duration,
|
| + thread_start=start_time),
|
| + async_slice.AsyncSlice(gpu_timeline.TOPLEVEL_DEVICE_CATEGORY,
|
| + name, start_time + offset,
|
| + args=args,
|
| + duration=duration,
|
| + thread_start=start_time + offset))
|
| +
|
| +
|
| +class GPUTimelineTest(unittest.TestCase):
|
| + def GetResults(self, metric, model, renderer_thread, interaction_records):
|
| + results = test_page_test_results.TestPageTestResults(self)
|
| + metric.AddResults(model, renderer_thread, interaction_records, results)
|
| + return results
|
| +
|
| + def testExpectedResults(self):
|
| + """Test a simply trace will output all expected results."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for slice_item in _CreateGPUAsyncSlices('test_item', 100, 10):
|
| + test_thread.AddAsyncSlice(slice_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_stddev', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_stddev', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_device_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_device_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_device_stddev', 'ms', 0)
|
| +
|
| + for tracked_name in gpu_timeline.TRACKED_NAMES.values():
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_max', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_avg', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_stddev', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_device_max', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_device_avg', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_device_stddev', 'ms', 0)
|
| +
|
| + def testNoDeviceTraceResults(self):
|
| + """Test expected results when missing device traces."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + service_slice, _ = _CreateGPUAsyncSlices('test_item', 100, 10)
|
| + test_thread.AddAsyncSlice(service_slice)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_stddev', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_gpu_service_stddev', 'ms', 0)
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + 'total_gpu_device_max')
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + 'total_gpu_device_avg')
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + 'total_gpu_device_stddev')
|
| +
|
| + for tracked_name in gpu_timeline.TRACKED_NAMES.values():
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_max', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_avg', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + tracked_name + '_service_stddev', 'ms', 0)
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + tracked_name + '_device_max')
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + tracked_name + '_device_avg')
|
| + self.assertRaises(AssertionError, results.GetPageSpecificValueNamed,
|
| + tracked_name + '_device_stddev')
|
| +
|
| + def testMismatchServiceDeviceCountsAsserts(self):
|
| + """Test extra GPU service traces will cause a mismatch assertion."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for index in xrange(100):
|
| + service_item, device_item = _CreateGPUAsyncSlices('test_item', 100, 10)
|
| + test_thread.AddAsyncSlice(service_item)
|
| + if index % 2 == 0:
|
| + test_thread.AddAsyncSlice(device_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + self.assertRaises(AssertionError, self.GetResults,
|
| + metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + def testFewExtraServiceTracesDoesNotAssert(self):
|
| + """Test a few extra GPU service traces will not cause mismatch assertion."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for index in xrange(4):
|
| + service_item, device_item = _CreateGPUAsyncSlices('test_item', 100, 10)
|
| + test_thread.AddAsyncSlice(service_item)
|
| + if index % 2 == 0:
|
| + test_thread.AddAsyncSlice(device_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + self.assertTrue(results)
|
| +
|
| + def testFewExtraDeviceTracesDoesAssert(self):
|
| + """Test a few extra GPU device traces will cause mismatch assertion."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for index in xrange(4):
|
| + service_item, device_item = _CreateGPUAsyncSlices('test_item', 100, 10)
|
| + test_thread.AddAsyncSlice(device_item)
|
| + if index % 2 == 0:
|
| + test_thread.AddAsyncSlice(service_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + self.assertRaises(AssertionError, self.GetResults,
|
| + metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + def testMismatchingServiceDeviceNameAsserts(self):
|
| + """Test that service/device trace names must match."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + service_item, device_item = _CreateGPUAsyncSlices('test_item', 100, 10)
|
| + device_item.name = 'test_item_2'
|
| + test_thread.AddAsyncSlice(device_item)
|
| + test_thread.AddAsyncSlice(service_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + self.assertRaises(AssertionError, self.GetResults,
|
| + metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + def testFrameSeparation(self):
|
| + """Test frames are correctly calculated using the frame end marker."""
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| +
|
| + # First frame is 10 seconds.
|
| + for slice_item in _CreateGPUAsyncSlices('test_item', 100, 10):
|
| + test_thread.AddAsyncSlice(slice_item)
|
| + test_thread.BeginSlice(FRAME_END_CATEGORY, FRAME_END_NAME, 105)
|
| + test_thread.EndSlice(110)
|
| +
|
| + # Second frame is 20 seconds.
|
| + for slice_item in _CreateGPUAsyncSlices('test_item', 110, 20):
|
| + test_thread.AddAsyncSlice(slice_item)
|
| +
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_max', 'ms', 20)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_avg', 'ms', 15)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + 'total_frame_stddev', 'ms', 5)
|
| +
|
| + def testTrackedNameTraces(self):
|
| + """Be sure tracked names are being recorded correctly."""
|
| + self.assertGreater(len(gpu_timeline.TRACKED_NAMES), 0)
|
| +
|
| + marker_name, result_name = gpu_timeline.TRACKED_NAMES.iteritems().next()
|
| +
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for slice_item in _CreateGPUAsyncSlices(marker_name, 100, 10):
|
| + test_thread.AddAsyncSlice(slice_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_stddev', 'ms', 0)
|
| +
|
| + def testTrackedNameWithContextIDTraces(self):
|
| + """Be sure tracked names with context IDs are recorded correctly."""
|
| + self.assertGreater(len(gpu_timeline.TRACKED_NAMES), 0)
|
| +
|
| + marker_name, result_name = gpu_timeline.TRACKED_NAMES.iteritems().next()
|
| + context_id = '-0x1234'
|
| +
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| + for slice_item in _CreateGPUAsyncSlices(marker_name + context_id, 100, 10):
|
| + test_thread.AddAsyncSlice(slice_item)
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result_name + '_service_stddev', 'ms', 0)
|
| +
|
| + def testOutOfOrderDeviceTraces(self):
|
| + """Out of order device traces are still matched up to correct services."""
|
| + self.assertGreaterEqual(len(gpu_timeline.TRACKED_NAMES), 2)
|
| +
|
| + tracked_names_iter = gpu_timeline.TRACKED_NAMES.iteritems()
|
| + marker1_name, result1_name = tracked_names_iter.next()
|
| + marker2_name, result2_name = tracked_names_iter.next()
|
| +
|
| + model = model_module.TimelineModel()
|
| + test_thread = model.GetOrCreateProcess(1).GetOrCreateThread(2)
|
| +
|
| + # marker1 lasts for 10 seconds.
|
| + service_item1, device_item1 = _CreateGPUAsyncSlices(marker1_name, 100, 10)
|
| + # marker2 lasts for 20 seconds.
|
| + service_item2, device_item2 = _CreateGPUAsyncSlices(marker2_name, 200, 20)
|
| +
|
| + # Append out of order
|
| + test_thread.AddAsyncSlice(service_item1)
|
| + test_thread.AddAsyncSlice(service_item2)
|
| + test_thread.AddAsyncSlice(device_item2)
|
| + test_thread.AddAsyncSlice(device_item1)
|
| +
|
| + model.FinalizeImport()
|
| +
|
| + metric = gpu_timeline.GPUTimelineMetric()
|
| + results = self.GetResults(metric, model=model, renderer_thread=test_thread,
|
| + interaction_records=INTERACTION_RECORDS)
|
| +
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result1_name + '_service_max', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result1_name + '_service_avg', 'ms', 10)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result1_name + '_service_stddev', 'ms', 0)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result2_name + '_service_max', 'ms', 20)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result2_name + '_service_avg', 'ms', 20)
|
| + results.AssertHasPageSpecificScalarValue(
|
| + result2_name + '_service_stddev', 'ms', 0)
|
|
|