Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(111)

Side by Side Diff: appengine/findit/util_scripts/remote_queries/try_job_data_metrics.py

Issue 2091023003: [Findit] Adding ability to query try job data by platform and error and calculate medians (Closed) Base URL: https://chromium.googlesource.com/infra/infra.git@master
Patch Set: Adding comment Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Pulls historical try job metadata from Findit and prints a report.""" 5 """Pulls historical try job metadata from Findit and prints a report."""
6 6
7 import argparse 7 import argparse
8 from collections import defaultdict 8 from collections import defaultdict
9 import datetime 9 import datetime
10 import numpy
10 import os 11 import os
11 import sys 12 import sys
12 13
13 _REMOTE_API_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir) 14 _REMOTE_API_DIR = os.path.join(os.path.dirname(__file__), os.path.pardir)
14 sys.path.insert(1, _REMOTE_API_DIR) 15 sys.path.insert(1, _REMOTE_API_DIR)
15 16
16 import remote_api 17 import remote_api
17 18
18 from model.wf_config import FinditConfig
19 from model.wf_try_job_data import WfTryJobData 19 from model.wf_try_job_data import WfTryJobData
20 20
21 21
22 NOT_AVAILABLE = 'N/A' 22 NOT_AVAILABLE = 'N/A'
23 23
24 24
25 def _GetOSPlatformName(master_name, builder_name):
26 """Returns the OS platform name based on the master and builder."""
27 builder_name = builder_name.lower()
28 master_name = master_name.lower()
29
30 if master_name == 'chromium.win':
31 return 'win'
32 elif master_name == 'chromium.linux':
33 if 'android' in builder_name:
34 return 'android'
35 else:
36 return 'unix'
37 elif master_name == 'chromium.chromiumos':
38 return 'unix'
39 else:
40 os_map = {
41 'win': 'win',
42 'linux': 'unix',
43 'chromiumos': 'unix',
44 'chromeos': 'unix',
45 'android': 'android',
46 'mac': 'mac',
47 'ios': 'ios',
48 }
49
50 for os_name, platform in os_map.iteritems():
51 if os_name in builder_name:
52 return platform
53
54 return 'unknown'
55
56
25 def _GetAverageOfNumbersInList(numbers): 57 def _GetAverageOfNumbersInList(numbers):
26 """Returns a float average of numbers or NOT_AVAILABLE if numbers is empty.""" 58 """Returns a float average of numbers or NOT_AVAILABLE if numbers is empty."""
27 return (float(sum(numbers)) / len(numbers)) if numbers else NOT_AVAILABLE 59 return (float(sum(numbers)) / len(numbers)) if numbers else NOT_AVAILABLE
28 60
29 61
30 def _FormatDigits(number): 62 def _FormatDigits(number):
31 """Formats number into a 2-digit float, or NOT_AVAILABLE.""" 63 """Formats number into a 2-digit float, or NOT_AVAILABLE."""
32 if isinstance(number, float): 64 if isinstance(number, float):
33 return float('%.2f' % number) 65 return float('%.2f' % number)
34 return NOT_AVAILABLE 66 return NOT_AVAILABLE
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 'under_fifteen_minutes_rate': The number of try jobs that finished in 111 'under_fifteen_minutes_rate': The number of try jobs that finished in
80 under 15 minutes / total try jobs. 112 under 15 minutes / total try jobs.
81 'under_thirty_minutes_rate': The number of try jobs that finished in 113 'under_thirty_minutes_rate': The number of try jobs that finished in
82 under 30 minutes / total try jobs. 114 under 30 minutes / total try jobs.
83 'over_thirty_minutes_rate': The number of try jobs that finished in over 115 'over_thirty_minutes_rate': The number of try jobs that finished in over
84 30 minutes / total try jobs. 116 30 minutes / total try jobs.
85 } 117 }
86 """ 118 """
87 try_jobs_per_day = NOT_AVAILABLE 119 try_jobs_per_day = NOT_AVAILABLE
88 average_regression_range_size = NOT_AVAILABLE 120 average_regression_range_size = NOT_AVAILABLE
121 median_regression_range_size = NOT_AVAILABLE
89 average_execution_time = NOT_AVAILABLE 122 average_execution_time = NOT_AVAILABLE
123 median_execution_time = NOT_AVAILABLE
124 average_end_to_end_time = NOT_AVAILABLE
125 median_end_to_end_time = NOT_AVAILABLE
90 average_time_in_queue = NOT_AVAILABLE 126 average_time_in_queue = NOT_AVAILABLE
127 median_execution_time = NOT_AVAILABLE
91 average_commits_analyzed = NOT_AVAILABLE 128 average_commits_analyzed = NOT_AVAILABLE
129 median_commits_analyzed = NOT_AVAILABLE
92 longest_execution_time = NOT_AVAILABLE 130 longest_execution_time = NOT_AVAILABLE
93 shortest_execution_time = NOT_AVAILABLE 131 shortest_execution_time = NOT_AVAILABLE
94 detection_rate = NOT_AVAILABLE 132 detection_rate = NOT_AVAILABLE
95 error_rate = NOT_AVAILABLE 133 error_rate = NOT_AVAILABLE
96 number_of_try_jobs = len(try_job_data_list) if try_job_data_list else 0 134 number_of_try_jobs = len(try_job_data_list) if try_job_data_list else 0
97 time_per_revision = NOT_AVAILABLE 135 time_per_revision = NOT_AVAILABLE
98 under_five_minutes_rate = NOT_AVAILABLE 136 under_five_minutes_rate = NOT_AVAILABLE
99 under_fifteen_minutes_rate = NOT_AVAILABLE 137 under_fifteen_minutes_rate = NOT_AVAILABLE
100 under_thirty_minutes_rate = NOT_AVAILABLE 138 under_thirty_minutes_rate = NOT_AVAILABLE
101 over_thirty_minutes_rate = NOT_AVAILABLE 139 over_thirty_minutes_rate = NOT_AVAILABLE
102 140
103 if try_job_data_list: 141 if try_job_data_list:
104 try_jobs_per_day = ( 142 try_jobs_per_day = (
105 len(try_job_data_list) / float((end_date - start_date).days)) 143 len(try_job_data_list) / float((end_date - start_date).days))
106 regression_range_sizes = [] 144 regression_range_sizes = []
107 execution_times_seconds = [] 145 execution_times_seconds = []
108 in_queue_times = [] 146 in_queue_times = []
147 end_to_end_times = []
109 commits_analyzed = [] 148 commits_analyzed = []
110 culprits_detected = 0 149 culprits_detected = 0
111 errors_detected = 0 150 errors_detected = 0
112 number_under_five_minutes = 0 151 number_under_five_minutes = 0
113 number_under_fifteen_minutes = 0 152 number_under_fifteen_minutes = 0
114 number_under_thirty_minutes = 0 153 number_under_thirty_minutes = 0
115 number_over_thirty_minutes = 0 154 number_over_thirty_minutes = 0
116 total_number_of_try_jobs = len(try_job_data_list) 155 total_number_of_try_jobs = len(try_job_data_list)
117 156
118 for try_job_data in try_job_data_list: 157 for try_job_data in try_job_data_list:
(...skipping 12 matching lines...) Expand all
131 if try_job_data.start_time and try_job_data.request_time: 170 if try_job_data.start_time and try_job_data.request_time:
132 in_queue_time_delta = ( 171 in_queue_time_delta = (
133 try_job_data.start_time - try_job_data.request_time) 172 try_job_data.start_time - try_job_data.request_time)
134 in_queue_time = in_queue_time_delta.total_seconds() 173 in_queue_time = in_queue_time_delta.total_seconds()
135 in_queue_times.append(in_queue_time) 174 in_queue_times.append(in_queue_time)
136 175
137 # Total time end-to-end. 176 # Total time end-to-end.
138 if try_job_data.request_time and try_job_data.end_time: 177 if try_job_data.request_time and try_job_data.end_time:
139 total_time_delta = try_job_data.end_time - try_job_data.start_time 178 total_time_delta = try_job_data.end_time - try_job_data.start_time
140 total_time_seconds = total_time_delta.total_seconds() 179 total_time_seconds = total_time_delta.total_seconds()
180 end_to_end_times.append(total_time_seconds)
141 181
142 if total_time_seconds < 300: # Under 5 minutes. 182 if total_time_seconds < 300: # Under 5 minutes.
143 number_under_five_minutes += 1 183 number_under_five_minutes += 1
144 elif total_time_seconds < 900: # Under 15 minutes. 184 elif total_time_seconds < 900: # Under 15 minutes.
145 number_under_fifteen_minutes += 1 185 number_under_fifteen_minutes += 1
146 elif total_time_seconds < 1800: # Under 30 minutes. 186 elif total_time_seconds < 1800: # Under 30 minutes.
147 number_under_thirty_minutes += 1 187 number_under_thirty_minutes += 1
148 else: # Over 30 minutes. 188 else: # Over 30 minutes.
149 number_over_thirty_minutes += 1 189 number_over_thirty_minutes += 1
150 190
151 # Number of commits analyzed. 191 # Number of commits analyzed.
152 if try_job_data.number_of_commits_analyzed: 192 if try_job_data.number_of_commits_analyzed:
153 commits_analyzed.append(try_job_data.number_of_commits_analyzed) 193 commits_analyzed.append(try_job_data.number_of_commits_analyzed)
154 194
155 # Culprit detection rate. 195 # Culprit detection rate.
156 if try_job_data.culprits: 196 if try_job_data.culprits:
157 culprits_detected += 1 197 culprits_detected += 1
158 198
159 if try_job_data.error: 199 if try_job_data.error:
160 errors_detected += 1 200 errors_detected += 1
161 201
162 average_regression_range_size = _GetAverageOfNumbersInList( 202 average_regression_range_size = _GetAverageOfNumbersInList(
163 regression_range_sizes) 203 regression_range_sizes)
204 median_regression_range_size = (
205 numpy.median(regression_range_sizes) if regression_range_sizes
206 else NOT_AVAILABLE)
164 average_execution_time = (_GetAverageOfNumbersInList( 207 average_execution_time = (_GetAverageOfNumbersInList(
165 execution_times_seconds) if execution_times_seconds else NOT_AVAILABLE) 208 execution_times_seconds) if execution_times_seconds else NOT_AVAILABLE)
209 median_execution_time = (
210 numpy.median(execution_times_seconds) if execution_times_seconds else
211 NOT_AVAILABLE)
212 average_end_to_end_time = (
213 _GetAverageOfNumbersInList(end_to_end_times) if end_to_end_times
214 else NOT_AVAILABLE)
215 median_end_to_end_time = (
216 numpy.median(end_to_end_times) if end_to_end_times else NOT_AVAILABLE)
166 average_time_in_queue = ( 217 average_time_in_queue = (
167 _GetAverageOfNumbersInList(in_queue_times) if in_queue_times else 218 _GetAverageOfNumbersInList(in_queue_times) if in_queue_times else
168 NOT_AVAILABLE) 219 NOT_AVAILABLE)
220 median_time_in_queue = (
221 numpy.median(in_queue_times) if in_queue_times else NOT_AVAILABLE)
169 average_commits_analyzed = _GetAverageOfNumbersInList( 222 average_commits_analyzed = _GetAverageOfNumbersInList(
170 commits_analyzed) 223 commits_analyzed)
224 median_commits_analyzed = (
225 numpy.median(commits_analyzed) if commits_analyzed else NOT_AVAILABLE)
171 longest_execution_time = ( 226 longest_execution_time = (
172 str(datetime.timedelta(seconds=max(execution_times_seconds))) 227 str(datetime.timedelta(
228 seconds=int(round(max(execution_times_seconds)))))
173 if execution_times_seconds else NOT_AVAILABLE) 229 if execution_times_seconds else NOT_AVAILABLE)
174 shortest_execution_time = ( 230 shortest_execution_time = (
175 str(datetime.timedelta(seconds=min(execution_times_seconds))) 231 str(datetime.timedelta(
232 seconds=int(round(min(execution_times_seconds)))))
176 if execution_times_seconds else NOT_AVAILABLE) 233 if execution_times_seconds else NOT_AVAILABLE)
177 detection_rate = float(culprits_detected) / total_number_of_try_jobs 234 detection_rate = float(culprits_detected) / total_number_of_try_jobs
178 error_rate = float(errors_detected) / total_number_of_try_jobs 235 error_rate = float(errors_detected) / total_number_of_try_jobs
179 time_per_revision = (average_execution_time / average_commits_analyzed if ( 236 time_per_revision = (average_execution_time / average_commits_analyzed if (
180 average_execution_time != NOT_AVAILABLE and 237 average_execution_time != NOT_AVAILABLE and
181 average_commits_analyzed != NOT_AVAILABLE) else NOT_AVAILABLE) 238 average_commits_analyzed != NOT_AVAILABLE) else NOT_AVAILABLE)
182 239
183 under_five_minutes_rate = ( 240 under_five_minutes_rate = (
184 float(number_under_five_minutes) / total_number_of_try_jobs) 241 float(number_under_five_minutes) / total_number_of_try_jobs)
185 under_fifteen_minutes_rate = ( 242 under_fifteen_minutes_rate = (
186 float(number_under_fifteen_minutes) / total_number_of_try_jobs) 243 float(number_under_fifteen_minutes) / total_number_of_try_jobs)
187 under_thirty_minutes_rate = ( 244 under_thirty_minutes_rate = (
188 float(number_under_thirty_minutes) / total_number_of_try_jobs) 245 float(number_under_thirty_minutes) / total_number_of_try_jobs)
189 over_thirty_minutes_rate = ( 246 over_thirty_minutes_rate = (
190 float(number_over_thirty_minutes) / total_number_of_try_jobs) 247 float(number_over_thirty_minutes) / total_number_of_try_jobs)
191 248
192 return { 249 return {
193 'try_jobs_per_day': _FormatDigits(try_jobs_per_day), 250 'try_jobs_per_day': _FormatDigits(try_jobs_per_day),
194 'average_regression_range_size': _FormatDigits( 251 'average_regression_range_size': _FormatDigits(
195 average_regression_range_size), 252 average_regression_range_size),
196 'average_execution_time': _FormatSecondsAsHMS( 253 'median_regression_range_size': median_regression_range_size,
197 _FormatDigits(average_execution_time)), 254 'average_execution_time': _FormatSecondsAsHMS(_FormatDigits(
255 average_execution_time)),
256 'median_execution_time': _FormatSecondsAsHMS(_FormatDigits(
257 median_execution_time)),
258 'average_end_to_end_time': _FormatSecondsAsHMS(_FormatDigits(
259 average_end_to_end_time)),
260 'median_end_to_end_time': _FormatSecondsAsHMS(_FormatDigits(
261 median_end_to_end_time)),
198 'average_time_in_queue': _FormatSecondsAsHMS( 262 'average_time_in_queue': _FormatSecondsAsHMS(
199 _FormatDigits(average_time_in_queue)), 263 _FormatDigits(average_time_in_queue)),
264 'median_time_in_queue': _FormatSecondsAsHMS(_FormatDigits(
265 median_time_in_queue)),
200 'average_commits_analyzed': _FormatDigits(average_commits_analyzed), 266 'average_commits_analyzed': _FormatDigits(average_commits_analyzed),
267 'median_commits_analyzed': median_commits_analyzed,
201 'longest_execution_time': longest_execution_time, 268 'longest_execution_time': longest_execution_time,
202 'shortest_execution_time': shortest_execution_time, 269 'shortest_execution_time': shortest_execution_time,
203 'number_of_try_jobs': number_of_try_jobs, 270 'number_of_try_jobs': number_of_try_jobs,
204 'detection_rate': _FormatDigits(detection_rate), 271 'detection_rate': _FormatDigits(detection_rate),
205 'error_rate': _FormatDigits(error_rate), 272 'error_rate': _FormatDigits(error_rate),
206 'time_per_revision': _FormatSecondsAsHMS( 273 'time_per_revision': _FormatSecondsAsHMS(
207 _FormatDigits(time_per_revision)), 274 _FormatDigits(time_per_revision)),
208 'under_five_minutes_rate': _FormatDigits(under_five_minutes_rate), 275 'under_five_minutes_rate': _FormatDigits(under_five_minutes_rate),
209 'under_fifteen_minutes_rate': _FormatDigits(under_fifteen_minutes_rate), 276 'under_fifteen_minutes_rate': _FormatDigits(under_fifteen_minutes_rate),
210 'under_thirty_minutes_rate': _FormatDigits(under_thirty_minutes_rate), 277 'under_thirty_minutes_rate': _FormatDigits(under_thirty_minutes_rate),
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 'without compile targets': [] 369 'without compile targets': []
303 } 370 }
304 for try_job_data in try_job_data_list: 371 for try_job_data in try_job_data_list:
305 if try_job_data.has_compile_targets: 372 if try_job_data.has_compile_targets:
306 categorized_data_dict['with compile targets'].append(try_job_data) 373 categorized_data_dict['with compile targets'].append(try_job_data)
307 else: 374 else:
308 categorized_data_dict['without compile targets'].append(try_job_data) 375 categorized_data_dict['without compile targets'].append(try_job_data)
309 return categorized_data_dict 376 return categorized_data_dict
310 377
311 378
379 def _SplitListByError(try_job_data_list):
380 categorized_data_dict = {
381 'with error': [],
382 'without error': []
383 }
384 for try_job_data in try_job_data_list:
385 if try_job_data.error:
386 categorized_data_dict['with error'].append(try_job_data)
387 else:
388 categorized_data_dict['without error'].append(try_job_data)
389 return categorized_data_dict
390
391
392 def _SplitListByPlatform(try_job_data_list):
393 categorized_data_dict = defaultdict(list)
394
395 for try_job_data in try_job_data_list:
396 builder_name = try_job_data.builder_name
397 master_name = try_job_data.master_name
398
399 if not master_name or not builder_name:
400 continue
401
402 platform = _GetOSPlatformName(master_name, builder_name)
403 categorized_data_dict[platform].append(try_job_data)
404
405 return categorized_data_dict
406
407
312 def SplitListByOption(try_job_data_list, option): 408 def SplitListByOption(try_job_data_list, option):
313 """Takes a WfTryJobData list and separates it into a dict based on arg. 409 """Takes a WfTryJobData list and separates it into a dict based on arg.
314 410
315 Args: 411 Args:
316 try_job_data_list: A list of WfTryJobData entities. 412 try_job_data_list: A list of WfTryJobData entities.
317 option: An option with which to split the data by. 413 option: An option with which to split the data by.
318 414
319 Returns: 415 Returns:
320 A dict where the keys are how the data is separated based on arg and the 416 A dict where the keys are how the data is separated based on arg and the
321 values are the corresponding lists of data. 417 values are the corresponding lists of data.
322 """ 418 """
323 419
324 if option == 't': # Try job type. 420 if option == 't': # Try job type.
325 return _SplitListByTryJobType(try_job_data_list) 421 return _SplitListByTryJobType(try_job_data_list)
326 elif option == 'm': # Main waterfall master. 422 elif option == 'm': # Main waterfall master.
327 return _SplitListByMaster(try_job_data_list) 423 return _SplitListByMaster(try_job_data_list)
328 elif option == 'b': # Main waterfall builder. 424 elif option == 'b': # Main waterfall builder.
329 return _SplitListByBuilder(try_job_data_list) 425 return _SplitListByBuilder(try_job_data_list)
330 elif option == 'r': # Whether or not heuristic results are included. 426 elif option == 'r': # Whether or not heuristic results are included.
331 return _SplitListByHeuristicResults(try_job_data_list) 427 return _SplitListByHeuristicResults(try_job_data_list)
332 elif option == 'c': # Whether or not compile targets are included. 428 elif option == 'c': # Whether or not compile targets are included.
333 return _SplitListByCompileTargets(try_job_data_list) 429 return _SplitListByCompileTargets(try_job_data_list)
334 # TODO(lijeffrey): Add support for splitting by platform. 430 elif option == 'e': # Whether or not try jobs with errors should be counted.
431 return _SplitListByError(try_job_data_list)
432 elif option == 'p': # Split by OS platform.
433 return _SplitListByPlatform(try_job_data_list)
335 434
336 # Unsupported flag, bail out without modification. 435 # Unsupported flag, bail out without modification.
337 return try_job_data_list 436 return try_job_data_list
338 437
339 438
340 def SplitStructByOption(try_job_data_struct, option): 439 def SplitStructByOption(try_job_data_struct, option):
341 if isinstance(try_job_data_struct, list): 440 if isinstance(try_job_data_struct, list):
342 try_job_data_struct = SplitListByOption(try_job_data_struct, option) 441 try_job_data_struct = SplitListByOption(try_job_data_struct, option)
343 elif isinstance(try_job_data_struct, dict): 442 elif isinstance(try_job_data_struct, dict):
344 for key, struct in try_job_data_struct.iteritems(): 443 for key, struct in try_job_data_struct.iteritems():
345 try_job_data_struct[key] = SplitStructByOption(struct, option) 444 try_job_data_struct[key] = SplitStructByOption(struct, option)
346 else: 445 else:
347 raise Exception('try job data dict must only contain lists or dicts.') 446 raise Exception('try job data dict must only contain lists or dicts.')
348 447
349 return try_job_data_struct 448 return try_job_data_struct
350 449
351 450
352 def GetArgsInOrder(): 451 def GetArgsInOrder():
353 command_line_args = sys.argv[1:] 452 command_line_args = sys.argv[1:]
354 453
355 parser = argparse.ArgumentParser() 454 parser = argparse.ArgumentParser()
356 parser.add_argument('-t', action='store_true', 455 parser.add_argument('-b', action='store_true',
357 help='group try job data by type (compile, test)') 456 help='group try job data by builder')
457 parser.add_argument('-c', action='store_true',
458 help=('group try job data by those with and without '
459 'compile targets'))
460 parser.add_argument('-e', action='store_true',
461 help=('group try job data by those with and without '
462 'errors detected'))
358 parser.add_argument('-m', action='store_true', 463 parser.add_argument('-m', action='store_true',
359 help='group try job data by master') 464 help='group try job data by master')
360 parser.add_argument('-b', action='store_true', 465 parser.add_argument('-p', action='store_true',
361 help='group try job data by builder') 466 help='group try job data by platform')
362 parser.add_argument('-r', action='store_true', 467 parser.add_argument('-r', action='store_true',
363 help=('group try job data by those with and without ' 468 help=('group try job data by those with and without '
364 'heuristic results')) 469 'heuristic results'))
365 parser.add_argument('-c', action='store_true', 470 parser.add_argument('-t', action='store_true',
366 help=('group try job data by those with and without ' 471 help='group try job data by type (compile, test)')
367 'compile targets'))
368 472
369 args_dict = vars(parser.parse_args()) 473 args_dict = vars(parser.parse_args())
370 474
371 # Preserve order from original command. 475 # Preserve order from original command.
372 ordered_args = [] 476 ordered_args = []
373 477
374 for original_arg in command_line_args: 478 for original_arg in command_line_args:
375 parsed_arg = original_arg[1:] 479 parsed_arg = original_arg[1:]
376 if args_dict[parsed_arg]: 480 if args_dict[parsed_arg]:
377 ordered_args.append(parsed_arg) 481 ordered_args.append(parsed_arg)
378 482
379 return ordered_args 483 return ordered_args
380 484
381 485
382 if __name__ == '__main__': 486 if __name__ == '__main__':
383 # Set up the Remote API to use services on the live App Engine. 487 # Set up the Remote API to use services on the live App Engine.
384 remote_api.EnableRemoteApi(app_id='findit-for-me') 488 remote_api.EnableRemoteApi(app_id='findit-for-me')
385 489
386 START_DATE = datetime.datetime(2016, 5, 1) 490 START_DATE = datetime.datetime(2016, 5, 1)
387 END_DATE = datetime.datetime(2016, 6, 17) 491 END_DATE = datetime.datetime(2016, 6, 23)
388 492
389 try_job_data_query = WfTryJobData.query( 493 try_job_data_query = WfTryJobData.query(
390 WfTryJobData.request_time >= START_DATE, 494 WfTryJobData.request_time >= START_DATE,
391 WfTryJobData.request_time < END_DATE) 495 WfTryJobData.request_time < END_DATE)
392 categorized_data = try_job_data_query.fetch() 496 categorized_data = try_job_data_query.fetch()
393 497
394 args = GetArgsInOrder() 498 args = GetArgsInOrder()
395 for arg in args: 499 for arg in args:
396 categorized_data = SplitStructByOption(categorized_data, arg) 500 categorized_data = SplitStructByOption(categorized_data, arg)
397 501
398 # TODO(lijeffrey): Display data in an html page instead of printing. 502 # TODO(lijeffrey): Display data in an html page instead of printing.
399 PrettyPrint(categorized_data, START_DATE, END_DATE) 503 PrettyPrint(categorized_data, START_DATE, END_DATE)
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698