OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright 2016 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 |
| 7 import json |
| 8 import os |
| 9 import unittest |
| 10 |
| 11 import generate_legacy_perf_dashboard_json |
| 12 |
| 13 class LegacyResultsProcessorUnittest(unittest.TestCase): |
| 14 def setUp(self): |
| 15 """Set up for all test method of each test method below.""" |
| 16 super(LegacyResultsProcessorUnittest, self).setUp() |
| 17 self.data_directory = os.path.join(os.path.dirname( |
| 18 os.path.abspath(__file__)), 'testdata') |
| 19 |
| 20 def _ConstructDefaultProcessor(self): |
| 21 """Creates a LegacyResultsProcessor instance. |
| 22 |
| 23 Returns: |
| 24 An instance of LegacyResultsProcessor class |
| 25 """ |
| 26 return generate_legacy_perf_dashboard_json.LegacyResultsProcessor() |
| 27 |
| 28 def _ProcessLog(self, log_processor, logfile): # pylint: disable=R0201 |
| 29 """Reads in a input log file and processes it. |
| 30 |
| 31 This changes the state of the log processor object; the output is stored |
| 32 in the object and can be gotten using the PerformanceLogs() method. |
| 33 |
| 34 Args: |
| 35 log_processor: An PerformanceLogProcessor instance. |
| 36 logfile: File name of an input performance results log file. |
| 37 """ |
| 38 for line in open(os.path.join(self.data_directory, logfile)): |
| 39 log_processor.ProcessLine(line) |
| 40 |
| 41 def _CheckFileExistsWithData(self, logs, graph): |
| 42 """Asserts that |graph| exists in the |logs| dict and is non-empty.""" |
| 43 self.assertTrue(graph in logs, 'File %s was not output.' % graph) |
| 44 self.assertTrue(logs[graph], 'File %s did not contain data.' % graph) |
| 45 |
| 46 def _ConstructParseAndCheckLogfiles(self, inputfiles, graphs): |
| 47 """Uses a log processor to process the given input files. |
| 48 |
| 49 Args: |
| 50 inputfiles: A list of input performance results log file names. |
| 51 logfiles: List of expected output ".dat" file names. |
| 52 |
| 53 Returns: |
| 54 A dictionary mapping output file name to output file lines. |
| 55 """ |
| 56 parser = self._ConstructDefaultProcessor() |
| 57 for inputfile in inputfiles: |
| 58 self._ProcessLog(parser, inputfile) |
| 59 |
| 60 logs = json.loads(parser.GenerateGraphJson()) |
| 61 for graph in graphs: |
| 62 self._CheckFileExistsWithData(logs, graph) |
| 63 |
| 64 return logs |
| 65 |
| 66 def _ConstructParseAndCheckJSON( |
| 67 self, inputfiles, logfiles, graphs): |
| 68 """Processes input with a log processor and checks against expectations. |
| 69 |
| 70 Args: |
| 71 inputfiles: A list of input performance result log file names. |
| 72 logfiles: A list of expected output ".dat" file names. |
| 73 subdir: Subdirectory containing expected output files. |
| 74 log_processor_class: A log processor class. |
| 75 """ |
| 76 logs = self._ConstructParseAndCheckLogfiles(inputfiles, graphs) |
| 77 index = 0 |
| 78 for filename in logfiles: |
| 79 graph_name = graphs[index] |
| 80 actual = logs[graph_name] |
| 81 path = os.path.join(self.data_directory, filename) |
| 82 expected = json.load(open(path)) |
| 83 self.assertEqual(expected, actual, 'JSON data in %s did not match ' |
| 84 'expectations.' % filename) |
| 85 |
| 86 index += 1 |
| 87 |
| 88 |
| 89 def testSummary(self): |
| 90 graphs = ['commit_charge', |
| 91 'ws_final_total', 'vm_final_browser', 'vm_final_total', |
| 92 'ws_final_browser', 'processes', 'artificial_graph'] |
| 93 # Tests the output of "summary" files, which contain per-graph data. |
| 94 input_files = ['graphing_processor.log'] |
| 95 output_files = ['%s-summary.dat' % graph for graph in graphs] |
| 96 |
| 97 self._ConstructParseAndCheckJSON(input_files, output_files, graphs) |
| 98 |
| 99 |
| 100 if __name__ == '__main__': |
| 101 unittest.main() |
OLD | NEW |