 Chromium Code Reviews
 Chromium Code Reviews Issue 2479543002:
  Porting relevant legacy conversion code from performance_lp to src side  (Closed)
    
  
    Issue 2479543002:
  Porting relevant legacy conversion code from performance_lp to src side  (Closed) 
  | OLD | NEW | 
|---|---|
| (Empty) | |
| 1 #!/usr/bin/env python | |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. | |
| 3 # Use of this source code is governed by a BSD-style license that can be | |
| 4 # found in the LICENSE file. | |
| 5 | |
| 6 | |
| 7 """Unit tests for generating legacy perf dashboard json used by | |
| 
eakuefner
2016/11/04 16:58:56
I think that it is clear what this file does from
 
eyaich1
2016/11/04 17:33:03
Done.
 | |
| 8 run_test_perf_test.py.""" | |
| 9 | |
| 10 import json | |
| 11 import os | |
| 12 import unittest | |
| 13 | |
| 14 import generate_legacy_perf_dashboard_json | |
| 15 | |
| 16 class LegacyResultsProcessorUnittest(unittest.TestCase): | |
| 17 def setUp(self): | |
| 18 """Set up for all test method of each test method below.""" | |
| 19 super(LegacyResultsProcessorUnittest, self).setUp() | |
| 20 self.data_directory = os.path.join(os.path.dirname( | |
| 21 os.path.abspath(__file__)), 'testdata') | |
| 22 | |
| 23 def _ConstructDefaultProcessor(self): | |
| 24 """Creates a LegacyResultsProcessor instance. | |
| 25 | |
| 26 Returns: | |
| 27 An instance of LegacyResultsProcessor class | |
| 28 """ | |
| 29 return generate_legacy_perf_dashboard_json.LegacyResultsProcessor() | |
| 30 | |
| 31 def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201 | |
| 32 """Reads in a input log file and processes it. | |
| 33 | |
| 34 This changes the state of the log processor object; the output is stored | |
| 35 in the object and can be gotten using the PerformanceLogs() method. | |
| 36 | |
| 37 Args: | |
| 38 log_processor: An PerformanceLogProcessor instance. | |
| 39 logfile: File name of an input performance results log file. | |
| 40 """ | |
| 41 for line in open(os.path.join(self.data_directory, logfile)): | |
| 42 log_processor._ProcessLine(line) | |
| 43 | |
| 44 def _CheckFileExistsWithData(self, logs, graph): | |
| 45 """Asserts that |graph| exists in the |logs| dict and is non-empty.""" | |
| 46 self.assertTrue(graph in logs, 'File %s was not output.' % graph) | |
| 47 self.assertTrue(logs[graph], 'File %s did not contain data.' % graph) | |
| 48 | |
| 49 def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs): | |
| 50 """Uses a log processor to process the given input files. | |
| 51 | |
| 52 Args: | |
| 53 inputfiles: A list of input performance results log file names. | |
| 54 logfiles: List of expected output ".dat" file names. | |
| 55 | |
| 56 Returns: | |
| 57 A dictionary mapping output file name to output file lines. | |
| 58 """ | |
| 59 parser = self._ConstructDefaultProcessor() | |
| 60 for inputfile in inputfiles: | |
| 61 self._ProcessLog(parser, inputfile) | |
| 62 | |
| 63 logs = json.loads(parser._GenerateGraphJson()) | |
| 64 for graph in graphs: | |
| 65 self._CheckFileExistsWithData(logs, graph) | |
| 66 | |
| 67 return logs | |
| 68 | |
| 69 def _ConstructParseAndCheckJSON( | |
| 70 self, inputfiles, logfiles, graphs): | |
| 71 """Processes input with a log processor and checks against expectations. | |
| 72 | |
| 73 Args: | |
| 74 inputfiles: A list of input performance result log file names. | |
| 75 logfiles: A list of expected output ".dat" file names. | |
| 76 subdir: Subdirectory containing expected output files. | |
| 77 log_processor_class: A log processor class. | |
| 78 """ | |
| 79 logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs) | |
| 80 index = 0 | |
| 81 for filename in logfiles: | |
| 82 graph_name = graphs[index] | |
| 83 actual = logs[graph_name] | |
| 84 path = os.path.join(self.data_directory, filename) | |
| 85 expected = json.load(open(path)) | |
| 86 self.assertEqual(expected, actual, 'JSON data in %s did not match ' | |
| 87 'expectations.' % filename) | |
| 88 | |
| 89 index += 1 | |
| 90 | |
| 91 | |
| 92 def testSummary(self): | |
| 93 graphs = ['commit_charge', | |
| 94 'ws_final_total', 'vm_final_browser', 'vm_final_total', | |
| 95 'ws_final_browser', 'processes', 'artificial_graph'] | |
| 96 # Tests the output of "summary" files, which contain per-graph data. | |
| 97 input_files = ['graphing_processor.log'] | |
| 98 output_files = ['%s-summary.dat' % graph for graph in graphs] | |
| 99 | |
| 100 self._ConstructParseAndCheckJSON(input_files, output_files, graphs) | |
| 101 | |
| 102 | |
| 103 if __name__ == '__main__': | |
| 104 unittest.main() | |
| OLD | NEW |