Chromium Code Reviews| Index: tools/perf/metrics/timeline.py |
| diff --git a/tools/perf/metrics/timeline.py b/tools/perf/metrics/timeline.py |
| index 1a1ca25e219edde5f445920118c05da1725b3db6..0412f9f81bb4df7a56b4baf0eb8211de8922ae0a 100644 |
| --- a/tools/perf/metrics/timeline.py |
| +++ b/tools/perf/metrics/timeline.py |
| @@ -127,15 +127,21 @@ def ThreadCategoryName(thread_name): |
| thread_category = TimelineThreadCategories[thread_name] |
| return thread_category |
| -def ThreadCpuTimeResultName(thread_category): |
| +def ThreadCpuTimeResultName(thread_category, measure_per_frame=True): |
|
jdduke (slow)
2015/04/29 18:07:17
I'm not married to the |measure_per_frame| name.
nednguyen
2015/04/30 05:07:45
I don't thing these helpers function add any value
jdduke (slow)
2015/05/01 15:51:44
Done.
|
| # This isn't a good name, but I don't want to change it and lose continuity. |
| - return "thread_" + thread_category + "_cpu_time_per_frame" |
| + if measure_per_frame: |
| + return "thread_" + thread_category + "_cpu_time_per_frame" |
| + return "thread_" + thread_category + "_cpu_time_total" |
|
nednguyen
2015/04/30 05:07:45
How stable is this thread_X_cpu_time_total metric?
jdduke (slow)
2015/04/30 14:29:39
In local testing, it's quite stable. Part of that
|
| -def ThreadTasksResultName(thread_category): |
| - return "tasks_per_frame_" + thread_category |
| +def ThreadTasksResultName(thread_category, measure_per_frame=True): |
| + if measure_per_frame: |
| + return "tasks_per_frame_" + thread_category |
| + return "tasks_" + thread_category |
|
nednguyen
2015/04/30 05:07:45
Same here, I think you should come up with a new T
jdduke (slow)
2015/04/30 14:29:39
Will do.
|
| -def ThreadMeanFrameTimeResultName(thread_category): |
| - return "mean_frame_time_" + thread_category |
| +def ThreadFrameTimeResultName(thread_category, measure_per_frame=True): |
| + if measure_per_frame: |
| + return "mean_frame_time_" + thread_category |
| + return "total_frame_time_" + thread_category |
| def ThreadDetailResultName(thread_category, detail): |
| detail_sanitized = detail.replace('.','_') |
| @@ -143,7 +149,7 @@ def ThreadDetailResultName(thread_category, detail): |
| class ResultsForThread(object): |
| - def __init__(self, model, record_ranges, name): |
| + def __init__(self, model, record_ranges, name, measure_per_frame): |
| self.model = model |
| self.toplevel_slices = [] |
| self.all_slices = [] |
| @@ -151,6 +157,7 @@ class ResultsForThread(object): |
| self.record_ranges = record_ranges |
| self.all_action_time = \ |
| sum([record_range.bounds for record_range in self.record_ranges]) |
| + self.measure_per_frame = measure_per_frame |
| @property |
| def clock_time(self): |
| @@ -191,25 +198,31 @@ class ResultsForThread(object): |
| # Currently we report cpu-time per frame, tasks per frame, and possibly |
| # the mean frame (if there is a trace specified to find it). |
| def AddResults(self, num_frames, results): |
| - cpu_per_frame = Rate(self.cpu_time, num_frames) |
| - tasks_per_frame = Rate(len(self.toplevel_slices), num_frames) |
| + num_intervals = num_frames if self.measure_per_frame else 1 |
| + cpu_per_interval = Rate(self.cpu_time, num_intervals) |
| + tasks_per_interval = Rate(len(self.toplevel_slices), num_intervals) |
| results.AddValue(scalar.ScalarValue( |
| - results.current_page, ThreadCpuTimeResultName(self.name), |
| - 'ms', cpu_per_frame)) |
| + results.current_page, |
| + ThreadCpuTimeResultName(self.name, self.measure_per_frame), 'ms', |
| + cpu_per_interval)) |
| results.AddValue(scalar.ScalarValue( |
| - results.current_page, ThreadTasksResultName(self.name), |
| - 'tasks', tasks_per_frame)) |
| + results.current_page, |
| + ThreadTasksResultName(self.name, self.measure_per_frame), |
| + 'tasks', tasks_per_interval)) |
| # Report mean frame time if this is the thread we are using for normalizing |
| # other results. We could report other frame rates (eg. renderer_main) but |
| # this might get confusing. |
| if self.name == FrameTraceThreadName: |
| - num_frames = self.CountTracesWithName(FrameTraceName) |
| - mean_frame_time = Rate(self.all_action_time, num_frames) |
| + num_intervals = self.CountTracesWithName(FrameTraceName) \ |
| + if self.measure_per_frame else 1 |
| + frame_time = Rate(self.all_action_time, num_intervals) |
| results.AddValue(scalar.ScalarValue( |
| - results.current_page, ThreadMeanFrameTimeResultName(self.name), |
| - 'ms', mean_frame_time)) |
| + results.current_page, |
| + ThreadFrameTimeResultName(self.name, self.measure_per_frame), |
| + 'ms', frame_time)) |
| def AddDetailedResults(self, num_frames, results): |
| + num_intervals = num_frames if self.measure_per_frame else 1 |
| slices_by_category = collections.defaultdict(list) |
| for s in self.all_slices: |
| slices_by_category[s.category].append(s) |
| @@ -217,13 +230,15 @@ class ResultsForThread(object): |
| for category, slices_in_category in slices_by_category.iteritems(): |
| self_time = sum([x.self_time for x in slices_in_category]) |
| all_self_times.append(self_time) |
| - self_time_result = (float(self_time) / num_frames) if num_frames else 0 |
| + self_time_result = ( |
| + (float(self_time) / num_intervals) if num_intervals else 0) |
| results.AddValue(scalar.ScalarValue( |
| results.current_page, ThreadDetailResultName(self.name, category), |
| 'ms', self_time_result)) |
| all_measured_time = sum(all_self_times) |
| idle_time = max(0, self.all_action_time - all_measured_time) |
| - idle_time_result = (float(idle_time) / num_frames) if num_frames else 0 |
| + idle_time_result = ( |
| + (float(idle_time) / num_intervals) if num_intervals else 0) |
| results.AddValue(scalar.ScalarValue( |
| results.current_page, ThreadDetailResultName(self.name, "idle"), |
| 'ms', idle_time_result)) |
| @@ -235,19 +250,22 @@ class ResultsForThread(object): |
| count += 1 |
| return count |
| + |
| class ThreadTimesTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
| - def __init__(self): |
| + def __init__(self, measure_per_frame=True): |
|
nednguyen
2015/04/30 05:07:45
Can you refactor this differently so we have two d
jdduke (slow)
2015/04/30 14:29:39
Sure, what about a PerFrameThreadTimesTimelineMetr
|
| super(ThreadTimesTimelineMetric, self).__init__() |
| # Minimal traces, for minimum noise in CPU-time measurements. |
| self.results_to_report = AllThreads |
| self.details_to_report = NoThreads |
| + self.measure_per_frame = measure_per_frame |
| def AddResults(self, model, _, interaction_records, results): |
| # Set up each thread category for consistant results. |
| thread_category_results = {} |
| for name in TimelineThreadCategories.values(): |
| thread_category_results[name] = ResultsForThread( |
| - model, [r.GetBounds() for r in interaction_records], name) |
| + model, [r.GetBounds() for r in interaction_records], name, |
| + self.measure_per_frame) |
| # Group the slices by their thread category. |
| for thread in model.GetAllThreads(): |
| @@ -275,3 +293,15 @@ class ThreadTimesTimelineMetric(timeline_based_metric.TimelineBasedMetric): |
| # can be replaced with a generic UI feature. |
| if thread_results.name in self.details_to_report: |
| thread_results.AddDetailedResults(num_frames, results) |
| + |
| + def ThreadDetailResultName(self, thread_category, detail): |
| + return ThreadDetailResultName(thread_category, detail) |
| + |
| + def ThreadCpuTimeResultName(self, thread_category): |
| + return ThreadCpuTimeResultName(thread_category, self.measure_per_frame) |
| + |
| + def ThreadTasksResultName(self, thread_category): |
| + return ThreadTasksResultName(thread_category, self.measure_per_frame) |
| + |
| + def ThreadFrameTimeResultName(self, thread_category): |
| + return ThreadFrameTimeResultName(thread_category, self.measure_per_frame) |