Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Side by Side Diff: scripts/slave/process_log_utils.py

Issue 23740006: Updating endure parser to read new csv output format (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: nits Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | scripts/slave/unittests/data/endure_gmail_alt_two_labels-EventListenerCount-summary.dat » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Parser and evaluator for performance tests. 5 """Parser and evaluator for performance tests.
6 6
7 Several performance tests have complicated log output, this module is intended 7 Several performance tests have complicated log output, this module is intended
8 to help buildsteps parse these logs and identify if tests had anomalies. 8 to help buildsteps parse these logs and identify if tests had anomalies.
9 9
10 10
(...skipping 820 matching lines...) Expand 10 before | Expand all | Expand 10 after
831 graphs = {} 831 graphs = {}
832 for name, graph in self._graphs.iteritems(): 832 for name, graph in self._graphs.iteritems():
833 graphs[name] = {'name': name, 833 graphs[name] = {'name': name,
834 'important': graph.IsImportant(), 834 'important': graph.IsImportant(),
835 'units': graph.units} 835 'units': graph.units}
836 self._output[GRAPH_LIST] = json.dumps(graphs).split('\n') 836 self._output[GRAPH_LIST] = json.dumps(graphs).split('\n')
837 837
838 838
839 class GraphingEndureLogProcessor(GraphingLogProcessor): 839 class GraphingEndureLogProcessor(GraphingLogProcessor):
840 """Handles additional processing for Chrome Endure data.""" 840 """Handles additional processing for Chrome Endure data."""
841 ENDURE_HEADER_LINE_REGEX = re.compile(r'^url,') 841 ENDURE_HEADER_LINE_REGEX = re.compile(r'^url,|^page_name,')
842 ENDURE_RESULT_LINE_REGEX = re.compile(r'^http') 842 ENDURE_RESULT_LINE_REGEX = re.compile(r'^http|^endure_')
843 ENDURE_FIELD_NAME_REGEX = re.compile( 843 ENDURE_FIELD_NAME_REGEX = re.compile(
844 r'(?P<TRACE>.*)_(?P<COORDINATE>[XY]) \((?P<UNITS>.*)\)') 844 r'(?P<TRACE>.*)_(?P<COORDINATE>[XY]) \((?P<UNITS>.*)\)')
845 845
846 def __init__(self, *args, **kwargs): 846 def __init__(self, *args, **kwargs):
847 GraphingLogProcessor.__init__(self, *args, **kwargs) 847 GraphingLogProcessor.__init__(self, *args, **kwargs)
848 # A dictionary of trace names to EndureGraph objects, which is generated 848 # A dictionary of trace names to EndureGraph objects, which is generated
849 # when parsing the header line. This template will be used as the default 849 # when parsing the header line. This template will be used as the default
850 # EndureGraph when parsing results lines. 850 # EndureGraph when parsing results lines.
851 self._graph_template = {} 851 self._graph_template = {}
852 852
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
893 def _ProcessEndureResultLine(self, line): 893 def _ProcessEndureResultLine(self, line):
894 """Parse each regular results line in the csv input. 894 """Parse each regular results line in the csv input.
895 895
896 A typical results line will look like this: 896 A typical results line will look like this:
897 https://www.google.com/calendar/,4,446,4,2847 897 https://www.google.com/calendar/,4,446,4,2847
898 """ 898 """
899 assert self._graph_template 899 assert self._graph_template
900 900
901 values = csv.reader([line]).next() 901 values = csv.reader([line]).next()
902 902
903 # Assume url is the first column. 903 # Assume test name is the first column.
904 test_name = self.url_as_file_safe_name(values[0]) 904 test_name = self.str_as_file_safe_name(values[0])
905 905
906 # Iterate over all trace names discovered from the header. 906 # Iterate over all trace names discovered from the header.
907 for trace_name in self._graph_template: 907 for trace_name in self._graph_template:
908 graph_name = test_name + '-' + trace_name 908 graph_name = test_name + '-' + trace_name
909 909
910 # The default EndureGraph is copied from the template, which already 910 # The default EndureGraph is copied from the template, which already
911 # contains all units and index information. 911 # contains all units and index information.
912 graph = self._graphs.get( 912 graph = self._graphs.get(
913 graph_name, 913 graph_name,
914 copy.deepcopy(self._graph_template[trace_name])) 914 copy.deepcopy(self._graph_template[trace_name]))
915 915
916 trace = graph.traces.get(trace_name, EndureTrace()) 916 trace = graph.traces.get(trace_name, EndureTrace())
917 trace.values.append([values[graph.csv_index_x], 917 trace.values.append([values[graph.csv_index_x],
918 values[graph.csv_index_y]]) 918 values[graph.csv_index_y]])
919 919
920 graph.traces[trace_name] = trace 920 graph.traces[trace_name] = trace
921 self._graphs[graph_name] = graph 921 self._graphs[graph_name] = graph
922 922
923 @staticmethod 923 @staticmethod
924 def url_as_file_safe_name(url): 924 def str_as_file_safe_name(string):
925 # Just replace all special characters in the url with underscore. 925 # Just replace all special characters in the string with underscores.
926 return re.sub('[^a-zA-Z0-9]', '_', url) 926 return re.sub('[^a-zA-Z0-9]', '_', string)
927 927
928 def _FinalizeProcessing(self): 928 def _FinalizeProcessing(self):
929 self.__CreateSummaryOutput() 929 self.__CreateSummaryOutput()
930 self.__GenerateGraphInfo() 930 self.__GenerateGraphInfo()
931 931
932 def __BuildSummaryJSON(self, graph): 932 def __BuildSummaryJSON(self, graph):
933 """Sorts the traces and returns a summary JSON encoding of the graph. 933 """Sorts the traces and returns a summary JSON encoding of the graph.
934 934
935 Although JS objects are not ordered, according to the spec, in practice 935 Although JS objects are not ordered, according to the spec, in practice
936 everyone iterates in order, since not doing so is a compatibility problem. 936 everyone iterates in order, since not doing so is a compatibility problem.
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
1093 for page in self._page_list: 1093 for page in self._page_list:
1094 times = page_times[page] 1094 times = page_times[page]
1095 mean, stddev = chromium_utils.FilteredMeanAndStandardDeviation(times) 1095 mean, stddev = chromium_utils.FilteredMeanAndStandardDeviation(times)
1096 file_data.append('%s (%s+/-%s): %s' % (page, 1096 file_data.append('%s (%s+/-%s): %s' % (page,
1097 FormatFloat(mean), 1097 FormatFloat(mean),
1098 FormatFloat(stddev), 1098 FormatFloat(stddev),
1099 JoinWithSpacesAndNewLine(times))) 1099 JoinWithSpacesAndNewLine(times)))
1100 1100
1101 filename = '%s_%s.dat' % (self._revision, trace_name) 1101 filename = '%s_%s.dat' % (self._revision, trace_name)
1102 return {filename: file_data} 1102 return {filename: file_data}
OLDNEW
« no previous file with comments | « no previous file | scripts/slave/unittests/data/endure_gmail_alt_two_labels-EventListenerCount-summary.dat » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698