Chromium Code Reviews| Index: testing/scripts/run_gtest_perf_test.py |
| diff --git a/testing/scripts/run_gtest_perf_test.py b/testing/scripts/run_gtest_perf_test.py |
| index ba4746cea1b20ba98cca1eb8ed65bc99b0eff75c..e4b1d968867cb53f38fbbd88311b549be355a1b1 100755 |
| --- a/testing/scripts/run_gtest_perf_test.py |
| +++ b/testing/scripts/run_gtest_perf_test.py |
| @@ -29,6 +29,18 @@ import traceback |
| import common |
| + |
| +def GetChromiumSrcDir(): |
| + return os.path.abspath( |
| + os.path.join(os.path.abspath(__file__), '..', '..', '..')) |
| + |
| +def GetPerfDir(): |
| + return os.path.join(GetChromiumSrcDir(), 'tools', 'perf') |
| +# Add src/tools/perf wheree generate_legacy_perf_dashboard_json.py lives |
|
Ken Russell (switch to Gerrit)
2016/11/04 15:16:16
typo: wheree
eyaich1
2016/11/04 17:33:02
Done.
|
| +sys.path.append(GetPerfDir()) |
| + |
| +import generate_legacy_perf_dashboard_json |
| + |
| # Add src/testing/ into sys.path for importing xvfb. |
| sys.path.append(os.path.join(os.path.dirname(__file__), '..')) |
| import xvfb |
| @@ -78,24 +90,28 @@ def main(): |
| executable = '.\%s.exe' % executable |
| else: |
| executable = './%s' % executable |
| - |
| - rc = common.run_command_with_output([executable] + [ |
| - '--write-abbreviated-json-results-to', args.isolated_script_test_output, |
| - ], env=env, stdoutfile=args.isolated_script_test_chartjson_output) |
| - |
| - # Now get the correct json format from the stdout to write to the |
| - # perf results file |
| + with common.temporary_file() as tempfile_path: |
| + valid = (common.run_command_with_output([executable], |
| + env=env, stdoutfile=tempfile_path) == 0) |
|
eakuefner
2016/11/04 16:58:56
style nit: indent +4 spaces instead of +2.
eyaich1
2016/11/04 17:33:02
Done.
|
| + |
| + # Now get the correct json format from the stdout to write to the |
| + # perf results file |
| + results_processor = \ |
|
eakuefner
2016/11/04 16:58:56
style nit: no backslash line continuations in pyth
eyaich1
2016/11/04 17:33:02
Done.
|
| + generate_legacy_perf_dashboard_json.LegacyResultsProcessor() |
| + charts = results_processor.GenerateJsonResults(tempfile_path) |
| + # Write the returned encoded json to a the charts output file |
| + with open(args.isolated_script_test_chartjson_output, 'w') as f: |
| + f.write(charts) |
| except Exception: |
| traceback.print_exc() |
| valid = False |
| - if not valid: |
| - failures = ['(entire test suite)'] |
| - with open(args.isolated_script_test_output, 'w') as fp: |
| - json.dump({ |
| - 'valid': valid, |
| - 'failures': failures, |
| - }, fp) |
| + failures = [] if valid else ['(entire test suite)'] |
| + with open(args.isolated_script_test_output, 'w') as fp: |
| + json.dump({ |
| + 'valid': valid, |
| + 'failures': failures, |
| + }, fp) |
| return rc |