OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Utility to display a summary of JSON-format GM results, and exit with | 6 """Schema of the JSON summary file written out by the GM tool. |
7 a nonzero errorcode if there were non-ignored failures in the GM results. | |
8 | 7 |
9 Usage: | 8 This must be kept in sync with the kJsonKey_ constants in gm_expectations.cpp ! |
10 python display_json_results.py <filename> | |
11 | |
12 TODO(epoger): We may want to add flags to set the following: | |
13 - which error types cause a nonzero return code | |
14 - maximum number of tests to list for any one ResultAccumulator | |
15 (to keep the output reasonably short) | |
16 """ | 9 """ |
17 | 10 |
18 __author__ = 'Elliot Poger' | 11 __author__ = 'Elliot Poger' |
19 | 12 |
20 | 13 |
| 14 # system-level imports |
21 import json | 15 import json |
22 import sys | |
23 | 16 |
24 | 17 |
25 # These constants must be kept in sync with the kJsonKey_ constants in | 18 # These constants must be kept in sync with the kJsonKey_ constants in |
26 # gm_expectations.cpp ! | 19 # gm_expectations.cpp ! |
27 JSONKEY_ACTUALRESULTS = 'actual-results' | 20 JSONKEY_ACTUALRESULTS = 'actual-results' |
28 JSONKEY_ACTUALRESULTS_FAILED = 'failed' | 21 JSONKEY_ACTUALRESULTS_FAILED = 'failed' |
29 JSONKEY_ACTUALRESULTS_FAILUREIGNORED = 'failure-ignored' | 22 JSONKEY_ACTUALRESULTS_FAILUREIGNORED = 'failure-ignored' |
30 JSONKEY_ACTUALRESULTS_NOCOMPARISON = 'no-comparison' | 23 JSONKEY_ACTUALRESULTS_NOCOMPARISON = 'no-comparison' |
31 JSONKEY_ACTUALRESULTS_SUCCEEDED = 'succeeded' | 24 JSONKEY_ACTUALRESULTS_SUCCEEDED = 'succeeded' |
32 | 25 |
33 | 26 def Load(filepath): |
34 class ResultAccumulator(object): | 27 """Loads the JSON summary written out by the GM tool. |
35 """Object that accumulates results of a given type, and can generate a | 28 Returns a dictionary keyed by the values listed as JSONKEY_ constants |
36 summary upon request.""" | 29 above.""" |
37 | 30 # In the future, we should add a version number to the JSON file to ensure |
38 def __init__(self, name, do_list, do_fail): | 31 # that the writer and reader agree on the schema (raising an exception |
39 """name: name of the category this result type falls into | 32 # otherwise). |
40 do_list: whether to list all of the tests with this results type | |
41 do_fail: whether to return with nonzero exit code if there are any | |
42 results of this type | |
43 """ | |
44 self._name = name | |
45 self._do_list = do_list | |
46 self._do_fail = do_fail | |
47 self._testnames = [] | |
48 | |
49 def AddResult(self, testname): | |
50 """Adds a result of this particular type. | |
51 testname: (string) name of the test""" | |
52 self._testnames.append(testname) | |
53 | |
54 def ShouldSignalFailure(self): | |
55 """Returns true if this result type is serious (self._do_fail is True) | |
56 and there were any results of this type.""" | |
57 if self._do_fail and self._testnames: | |
58 return True | |
59 else: | |
60 return False | |
61 | |
62 def GetSummaryLine(self): | |
63 """Returns a single-line string summary of all results added to this | |
64 accumulator so far.""" | |
65 summary = '' | |
66 if self._do_fail: | |
67 summary += '[*] ' | |
68 else: | |
69 summary += '[ ] ' | |
70 summary += str(len(self._testnames)) | |
71 summary += ' ' | |
72 summary += self._name | |
73 if self._do_list: | |
74 summary += ': ' | |
75 for testname in self._testnames: | |
76 summary += testname | |
77 summary += ' ' | |
78 return summary | |
79 | |
80 | |
81 def Display(filepath): | |
82 """Displays a summary of the results in a JSON file. | |
83 Returns True if the results are free of any significant failures. | |
84 filepath: (string) path to JSON file""" | |
85 | |
86 # Map labels within the JSON file to the ResultAccumulator for each label. | |
87 results_map = { | |
88 JSONKEY_ACTUALRESULTS_FAILED: | |
89 ResultAccumulator(name='ExpectationsMismatch', | |
90 do_list=True, do_fail=True), | |
91 JSONKEY_ACTUALRESULTS_FAILUREIGNORED: | |
92 ResultAccumulator(name='IgnoredExpectationsMismatch', | |
93 do_list=True, do_fail=False), | |
94 JSONKEY_ACTUALRESULTS_NOCOMPARISON: | |
95 ResultAccumulator(name='MissingExpectations', | |
96 do_list=False, do_fail=False), | |
97 JSONKEY_ACTUALRESULTS_SUCCEEDED: | |
98 ResultAccumulator(name='Passed', | |
99 do_list=False, do_fail=False), | |
100 } | |
101 | |
102 success = True | |
103 json_dict = json.load(open(filepath)) | 33 json_dict = json.load(open(filepath)) |
104 actual_results = json_dict[JSONKEY_ACTUALRESULTS] | 34 return json_dict |
105 for label, accumulator in results_map.iteritems(): | |
106 results = actual_results[label] | |
107 if results: | |
108 for result in results: | |
109 accumulator.AddResult(result) | |
110 print accumulator.GetSummaryLine() | |
111 if accumulator.ShouldSignalFailure(): | |
112 success = False | |
113 print '(results marked with [*] will cause nonzero return value)' | |
114 return success | |
115 | |
116 | |
117 if '__main__' == __name__: | |
118 if len(sys.argv) != 2: | |
119 raise Exception('usage: %s <input-json-filepath>' % sys.argv[0]) | |
120 sys.exit(0 if Display(sys.argv[1]) else 1) | |
OLD | NEW |