| OLD | NEW |
| 1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 import os | 5 import os |
| 6 import time | 6 import time |
| 7 | 7 |
| 8 from . import parse_metric | 8 from . import parse_metric |
| 9 | 9 |
| 10 | 10 |
| 11 def run_perf_test(api, test_config): | 11 def run_perf_test(api, test_config): |
| 12 """Runs the command N times and parses a metric from the output.""" | 12 """Runs the command N times and parses a metric from the output.""" |
| 13 limit = time.time() + test_config['timeout_seconds'] | 13 limit = time.time() + test_config['timeout_seconds'] |
| 14 values = [] | 14 values = [] |
| 15 metric = test_config['metric'].split('/') | 15 metric = test_config['metric'].split('/') |
| 16 for i in range(test_config['repeat_count']): | 16 for i in range(test_config['repeat_count']): |
| 17 if time.time() < limit: | 17 if time.time() < limit: |
| 18 command_name = "Performance Test %d/%d" % (i + 1, | 18 command_name = "Performance Test %d/%d" % (i + 1, |
| 19 test_config['repeat_count']) | 19 test_config['repeat_count']) |
| 20 if api.m.platform.is_linux: | 20 if api.m.platform.is_linux: |
| 21 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join( | 21 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join( |
| 22 '/opt', 'chromium', 'chrome_sandbox') | 22 '/opt', 'chromium', 'chrome_sandbox') |
| 23 out, err = _run_command(api, test_config['command'], command_name) | 23 out, err = _run_command(api, test_config['command'], command_name) |
| 24 if out is None and err is None: | 24 if out is None and err is None: |
| 25 #dummy value when running test TODO: replace with a mock | 25 # dummy value when running test TODO: replace with a mock |
| 26 values.append(0) | 26 values.append(0) |
| 27 else: # pragma: no cover | 27 else: # pragma: no cover |
| 28 valid_value, value = parse_metric.parse_metric(out, err, metric) | 28 valid_value, value = parse_metric.parse_metric(out, err, metric) |
| 29 assert valid_value | 29 assert valid_value |
| 30 values.extend(value) | 30 values.extend(value) |
| 31 else: # pragma: no cover | 31 else: # pragma: no cover |
| 32 break | 32 break |
| 33 return values | 33 return values |
| 34 | 34 |
| 35 | 35 |
| 36 def truncate_and_aggregate(api, values, truncate_percent): | 36 def truncate_and_aggregate(api, values, truncate_percent): |
| 37 truncate_proportion = truncate_percent / 100.0 | 37 truncate_proportion = truncate_percent / 100.0 |
| 38 mean = api.m.math_utils.truncated_mean(values, truncate_proportion) | 38 mean = api.m.math_utils.truncated_mean(values, truncate_proportion) |
| 39 std_err = api.m.math_utils.standard_error(values) | 39 std_err = api.m.math_utils.standard_error(values) |
| 40 return {'mean': mean, 'std_err': std_err, 'values': values} | 40 return {'mean': mean, 'std_err': std_err, 'values': values} |
| 41 | 41 |
| 42 | 42 |
| 43 def _run_command(api, command, command_name): | 43 def _run_command(api, command, command_name): |
| 44 command_parts = command.split() | 44 command_parts = command.split() |
| 45 stdout = api.m.raw_io.output() | 45 stdout = api.m.raw_io.output() |
| 46 stderr = api.m.raw_io.output() | 46 stderr = api.m.raw_io.output() |
| 47 step_result = api.m.step( | 47 step_result = api.m.step( |
| 48 command_name, | 48 command_name, |
| 49 command_parts, | 49 command_parts, |
| 50 stdout=stdout, | 50 stdout=stdout, |
| 51 stderr=stderr) | 51 stderr=stderr) |
| 52 return step_result.stdout, step_result.stderr | 52 return step_result.stdout, step_result.stderr |
| OLD | NEW |