Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(94)

Unified Diff: build/android/pylib/perf_tests_helper.py

Issue 24253006: Android / Telemetry: final cleanup on perf_tests_helper. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: lint Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: build/android/pylib/perf_tests_helper.py
diff --git a/build/android/pylib/perf_tests_helper.py b/build/android/pylib/perf_tests_helper.py
deleted file mode 100644
index 93cabcd71e205083f9dbbfc827c4bab46352ac92..0000000000000000000000000000000000000000
--- a/build/android/pylib/perf_tests_helper.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# Copyright (c) 2012 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import json
-import logging
-import math
-import re
-import sys
-
-import android_commands
-
-from perf import cache_control
-from perf import perf_control
-
-
-# Valid values of result type.
-RESULT_TYPES = {'unimportant': 'RESULT ',
- 'default': '*RESULT ',
- 'informational': '',
- 'unimportant-histogram': 'HISTOGRAM ',
- 'histogram': '*HISTOGRAM '}
-
-
-def _EscapePerfResult(s):
- """Escapes |s| for use in a perf result."""
- return re.sub('[\:|=/#&,]', '_', s)
-
-
-def _Flatten(values):
- """Returns a simple list without sub-lists."""
- ret = []
- for entry in values:
- if isinstance(entry, list):
- ret.extend(_Flatten(entry))
- else:
- ret.append(entry)
- return ret
-
-
-def GeomMeanAndStdDevFromHistogram(histogram_json):
- histogram = json.loads(histogram_json)
- # Handle empty histograms gracefully.
- if not 'buckets' in histogram:
- return 0.0, 0.0
- count = 0
- sum_of_logs = 0
- for bucket in histogram['buckets']:
- if 'high' in bucket:
- bucket['mean'] = (bucket['low'] + bucket['high']) / 2.0
- else:
- bucket['mean'] = bucket['low']
- if bucket['mean'] > 0:
- sum_of_logs += math.log(bucket['mean']) * bucket['count']
- count += bucket['count']
-
- if count == 0:
- return 0.0, 0.0
-
- sum_of_squares = 0
- geom_mean = math.exp(sum_of_logs / count)
- for bucket in histogram['buckets']:
- if bucket['mean'] > 0:
- sum_of_squares += (bucket['mean'] - geom_mean) ** 2 * bucket['count']
- return geom_mean, math.sqrt(sum_of_squares / count)
-
-
-def _MeanAndStdDevFromList(values):
- avg = None
- sd = None
- if len(values) > 1:
- try:
- value = '[%s]' % ','.join([str(v) for v in values])
- avg = sum([float(v) for v in values]) / len(values)
- sqdiffs = [(float(v) - avg) ** 2 for v in values]
- variance = sum(sqdiffs) / (len(values) - 1)
- sd = math.sqrt(variance)
- except ValueError:
- value = ", ".join(values)
- else:
- value = values[0]
- return value, avg, sd
-
-
-def PrintPages(page_list):
- """Prints list of pages to stdout in the format required by perf tests."""
- print 'Pages: [%s]' % ','.join([_EscapePerfResult(p) for p in page_list])
-
-
-def PrintPerfResult(measurement, trace, values, units, result_type='default',
- print_to_stdout=True):
- """Prints numerical data to stdout in the format required by perf tests.
-
- The string args may be empty but they must not contain any colons (:) or
- equals signs (=).
-
- Args:
- measurement: A description of the quantity being measured, e.g. "vm_peak".
- trace: A description of the particular data point, e.g. "reference".
- values: A list of numeric measured values. An N-dimensional list will be
- flattened and treated as a simple list.
- units: A description of the units of measure, e.g. "bytes".
- result_type: Accepts values of RESULT_TYPES.
- print_to_stdout: If True, prints the output in stdout instead of returning
- the output to caller.
-
- Returns:
- String of the formated perf result.
- """
- assert result_type in RESULT_TYPES, 'result type: %s is invalid' % result_type
-
- trace_name = _EscapePerfResult(trace)
-
- if result_type in ['unimportant', 'default', 'informational']:
- assert isinstance(values, list)
- assert len(values)
- assert '/' not in measurement
- value, avg, sd = _MeanAndStdDevFromList(_Flatten(values))
- output = '%s%s: %s%s%s %s' % (
- RESULT_TYPES[result_type],
- _EscapePerfResult(measurement),
- trace_name,
- # Do not show equal sign if the trace is empty. Usually it happens when
- # measurement is enough clear to describe the result.
- '= ' if trace_name else '',
- value,
- units)
- else:
- assert(result_type in ['histogram', 'unimportant-histogram'])
- assert isinstance(values, list)
- # The histograms can only be printed individually, there's no computation
- # across different histograms.
- assert len(values) == 1
- value = values[0]
- output = '%s%s: %s= %s %s' % (
- RESULT_TYPES[result_type],
- _EscapePerfResult(measurement),
- trace_name,
- value,
- units)
- avg, sd = GeomMeanAndStdDevFromHistogram(value)
-
- if avg:
- output += '\nAvg %s: %f%s' % (measurement, avg, units)
- if sd:
- output += '\nSd %s: %f%s' % (measurement, sd, units)
- if print_to_stdout:
- print output
- sys.stdout.flush()
- return output
-
-
-# TODO(bulach): remove once all references to PerfControl are fixed.
-class CacheControl(cache_control.CacheControl):
- def __init__(self, adb):
- super(CacheControl, self).__init__(adb)
-
-class PerfControl(perf_control.PerfControl):
- def __init__(self, adb):
- super(PerfControl, self).__init__(adb)
« no previous file with comments | « build/android/pylib/instrumentation/test_runner.py ('k') | build/util/lib/common/perf_tests_results_helper.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698