Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(499)

Side by Side Diff: build/android/pylib/perf/test_runner.py

Issue 1140783002: Surface test times of android tests. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Runs perf tests. 5 """Runs perf tests.
6 6
7 Our buildbot infrastructure requires each slave to run steps serially. 7 Our buildbot infrastructure requires each slave to run steps serially.
8 This is sub-optimal for android, where these steps can run independently on 8 This is sub-optimal for android, where these steps can run independently on
9 multiple connected devices. 9 multiple connected devices.
10 10
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 import time 59 import time
60 60
61 from pylib import cmd_helper 61 from pylib import cmd_helper
62 from pylib import constants 62 from pylib import constants
63 from pylib import forwarder 63 from pylib import forwarder
64 from pylib.base import base_test_result 64 from pylib.base import base_test_result
65 from pylib.base import base_test_runner 65 from pylib.base import base_test_runner
66 from pylib.device import device_errors 66 from pylib.device import device_errors
67 67
68 68
69 def GetPersistedResult(test_name):
70 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name)
71 if not os.path.exists(file_name):
72 logging.error('File not found %s', file_name)
73 return None
74
75 with file(file_name, 'r') as f:
76 return pickle.loads(f.read())
77
78
69 def OutputJsonList(json_input, json_output): 79 def OutputJsonList(json_input, json_output):
70 with file(json_input, 'r') as i: 80 with file(json_input, 'r') as i:
71 all_steps = json.load(i) 81 all_steps = json.load(i)
72 step_values = [{'test': k, 'device_affinity': v['device_affinity']} 82
73 for k, v in all_steps['steps'].iteritems()] 83 step_values = []
84 for k, v in all_steps['steps'].iteritems():
85 data = {'test': k,
86 'device_affinity': v['device_affinity'],
87 'total_time': -1}
Sami 2015/05/13 18:39:18 Not sure if it makes more sense to set the total t
88
89 persisted_result = GetPersistedResult(k)
90 if persisted_result:
91 data['total_time'] = persisted_result['total_time']
92 step_values.append(data)
93
74 with file(json_output, 'w') as o: 94 with file(json_output, 'w') as o:
75 o.write(json.dumps(step_values)) 95 o.write(json.dumps(step_values))
76 return 0 96 return 0
77 97
78 98
79 def PrintTestOutput(test_name, json_file_name=None): 99 def PrintTestOutput(test_name, json_file_name=None):
80 """Helper method to print the output of previously executed test_name. 100 """Helper method to print the output of previously executed test_name.
81 101
82 Args: 102 Args:
83 test_name: name of the test that has been previously executed. 103 test_name: name of the test that has been previously executed.
84 json_file_name: name of the file to output chartjson data to. 104 json_file_name: name of the file to output chartjson data to.
85 105
86 Returns: 106 Returns:
87 exit code generated by the test step. 107 exit code generated by the test step.
88 """ 108 """
89 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) 109 persisted_result = GetPersistedResult(test_name)
90 if not os.path.exists(file_name): 110 if not persisted_result:
91 logging.error('File not found %s', file_name)
92 return 1 111 return 1
93
94 with file(file_name, 'r') as f:
95 persisted_result = pickle.loads(f.read())
96 logging.info('*' * 80) 112 logging.info('*' * 80)
97 logging.info('Output from:') 113 logging.info('Output from:')
98 logging.info(persisted_result['cmd']) 114 logging.info(persisted_result['cmd'])
99 logging.info('*' * 80) 115 logging.info('*' * 80)
100 print persisted_result['output'] 116 print persisted_result['output']
101 117
102 if json_file_name: 118 if json_file_name:
103 with file(json_file_name, 'w') as f: 119 with file(json_file_name, 'w') as f:
104 f.write(persisted_result['chartjson']) 120 f.write(persisted_result['chartjson'])
105 121
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
329 Returns: 345 Returns:
330 A tuple of (TestRunResults, retry). 346 A tuple of (TestRunResults, retry).
331 """ 347 """
332 _, result_type = self._LaunchPerfTest(test_name) 348 _, result_type = self._LaunchPerfTest(test_name)
333 results = base_test_result.TestRunResults() 349 results = base_test_result.TestRunResults()
334 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) 350 results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
335 retry = None 351 retry = None
336 if not results.DidRunPass(): 352 if not results.DidRunPass():
337 retry = test_name 353 retry = test_name
338 return results, retry 354 return results, retry
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698