| Index: build/android/pylib/perf/perf_test_instance.py
|
| diff --git a/build/android/pylib/perf/perf_test_instance.py b/build/android/pylib/perf/perf_test_instance.py
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..548708b7658439324b813c7a8b16085ac6e5de54
|
| --- /dev/null
|
| +++ b/build/android/pylib/perf/perf_test_instance.py
|
| @@ -0,0 +1,183 @@
|
| +# Copyright 2016 The Chromium Authors. All rights reserved.
|
| +# Use of this source code is governed by a BSD-style license that can be
|
| +# found in the LICENSE file.
|
| +
|
| +import json
|
| +import logging
|
| +import os
|
| +import pickle
|
| +
|
| +from pylib import constants
|
| +from pylib.base import test_instance
|
| +from pylib.constants import exit_codes
|
| +
|
| +
|
| +def _GetPersistedResult(test_name):
|
| + file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
|
| + if not os.path.exists(file_name):
|
| + logging.error('File not found %s', file_name)
|
| + return None
|
| +
|
| + with file(file_name, 'r') as f:
|
| + return pickle.loads(f.read())
|
| +
|
| +
|
| +class PerfTestInstance(test_instance.TestInstance):
|
| + def __init__(self, args, _):
|
| + super(PerfTestInstance, self).__init__()
|
| +
|
| + if args.single_step:
|
| + args.single_step = ' '.join(args.single_step_command)
|
| +
|
| + self._collect_chartjson_data = args.collect_chartjson_data
|
| + self._dry_run = args.dry_run
|
| + self._flaky_steps = args.flaky_steps
|
| + self._get_output_dir_archive = args.get_output_dir_archive
|
| + self._known_devices_file = args.known_devices_file
|
| + self._max_battery_temp = args.max_battery_temp
|
| + self._min_battery_level = args.min_battery_level
|
| + self._no_timeout = args.no_timeout
|
| + self._output_chartjson_data = args.output_chartjson_data
|
| + self._output_json_list = args.output_json_list
|
| + self._print_step = args.print_step
|
| + self._single_step = args.single_step
|
| + self._steps = args.steps
|
| + self._test_filter = args.test_filter
|
| +
|
| + def SetUp(self):
|
| + pass
|
| +
|
| + def TearDown(self):
|
| + pass
|
| +
|
| + def OutputJsonList(self):
|
| + with file(self._steps, 'r') as i:
|
| + all_steps = json.load(i)
|
| +
|
| + step_values = []
|
| + for k, v in all_steps['steps'].iteritems():
|
| + data = {'test': k, 'device_affinity': v['device_affinity']}
|
| +
|
| + persisted_result = _GetPersistedResult(k)
|
| + if persisted_result:
|
| + data['start_time'] = persisted_result['start_time']
|
| + data['end_time'] = persisted_result['end_time']
|
| + data['total_time'] = persisted_result['total_time']
|
| + data['has_archive'] = persisted_result['archive_bytes'] is not None
|
| + step_values.append(data)
|
| +
|
| + with file(self._output_json_list, 'w') as o:
|
| + o.write(json.dumps(step_values))
|
| + return 0
|
| +
|
| + def PrintTestOutput(self):
|
| + """Helper method to print the output of previously executed test_name.
|
| +
|
| + Test_name is passed from the command line as print_step
|
| +
|
| + Returns:
|
| + exit code generated by the test step.
|
| + """
|
| + persisted_result = _GetPersistedResult(self._print_step)
|
| + if not persisted_result:
|
| + return exit_codes.INFRA
|
| + logging.info('*' * 80)
|
| + logging.info('Output from:')
|
| + logging.info(persisted_result['cmd'])
|
| + logging.info('*' * 80)
|
| +
|
| + output_formatted = ''
|
| + persisted_outputs = persisted_result['output']
|
| + for i in xrange(len(persisted_outputs)):
|
| + output_formatted += '\n\nOutput from run #%d:\n\n%s' % (
|
| + i, persisted_outputs[i])
|
| + print output_formatted
|
| +
|
| + if self._output_chartjson_data:
|
| + with file(self._output_chartjson_data, 'w') as f:
|
| + f.write(persisted_result['chartjson'])
|
| +
|
| + if self._get_output_dir_archive:
|
| + if persisted_result['archive_bytes'] is not None:
|
| + with file(self._get_output_dir_archive, 'wb') as f:
|
| + f.write(persisted_result['archive_bytes'])
|
| + else:
|
| + logging.error('The output dir was not archived.')
|
| +
|
| + return persisted_result['exit_code']
|
| +
|
| + #override
|
| + def TestType(self):
|
| + return 'Perf'
|
| +
|
| + @staticmethod
|
| + def ReadChartjsonOutput(output_dir):
|
| + if not output_dir:
|
| + return ''
|
| + json_output_path = os.path.join(output_dir, 'results-chart.json')
|
| + try:
|
| + with open(json_output_path) as f:
|
| + return f.read()
|
| + except IOError:
|
| + logging.exception('Exception when reading chartjson.')
|
| + logging.error('This usually means that telemetry did not run, so it could'
|
| + ' not generate the file. Please check the device running'
|
| + ' the test.')
|
| + return ''
|
| +
|
| +
|
| + @property
|
| + def collect_chartjson_data(self):
|
| + return self._collect_chartjson_data
|
| +
|
| + @property
|
| + def dry_run(self):
|
| + return self._dry_run
|
| +
|
| + @property
|
| + def flaky_steps(self):
|
| + return self._flaky_steps
|
| +
|
| + @property
|
| + def get_output_dir_archive(self):
|
| + return self._get_output_dir_archive
|
| +
|
| + @property
|
| + def known_devices_file(self):
|
| + return self._known_devices_file
|
| +
|
| + @property
|
| + def max_battery_temp(self):
|
| + return self._max_battery_temp
|
| +
|
| + @property
|
| + def min_battery_level(self):
|
| + return self._min_battery_level
|
| +
|
| + @property
|
| + def no_timeout(self):
|
| + return self._no_timeout
|
| +
|
| + @property
|
| + def output_chartjson_data(self):
|
| + return self._output_chartjson_data
|
| +
|
| + @property
|
| + def output_json_list(self):
|
| + return self._output_json_list
|
| +
|
| + @property
|
| + def print_step(self):
|
| + return self._print_step
|
| +
|
| + @property
|
| + def single_step(self):
|
| + return self._single_step
|
| +
|
| + @property
|
| + def steps(self):
|
| + return self._steps
|
| +
|
| + @property
|
| + def test_filter(self):
|
| + return self._test_filter
|
|
|