Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(405)

Side by Side Diff: Tools/Scripts/webkitpy/layout_tests/models/test_run_results.py

Issue 339623002: Added support for versioning of layout test results of run-webkit-tests runs (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Created 6 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright (C) 2010 Google Inc. All rights reserved. 1 # Copyright (C) 2010 Google Inc. All rights reserved.
2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged 2 # Copyright (C) 2010 Gabor Rapcsanyi (rgabor@inf.u-szeged.hu), University of Sze ged
3 # 3 #
4 # Redistribution and use in source and binary forms, with or without 4 # Redistribution and use in source and binary forms, with or without
5 # modification, are permitted provided that the following conditions are 5 # modification, are permitted provided that the following conditions are
6 # met: 6 # met:
7 # 7 #
8 # * Redistributions of source code must retain the above copyright 8 # * Redistributions of source code must retain the above copyright
9 # notice, this list of conditions and the following disclaimer. 9 # notice, this list of conditions and the following disclaimer.
10 # * Redistributions in binary form must reproduce the above 10 # * Redistributions in binary form must reproduce the above
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
156 156
157 if test_failures.FailureMissingImage in failure_types or test_failures.Failu reMissingImageHash in failure_types: 157 if test_failures.FailureMissingImage in failure_types or test_failures.Failu reMissingImageHash in failure_types:
158 test_dict['is_missing_image'] = True 158 test_dict['is_missing_image'] = True
159 159
160 if test_failures.FailureTestHarnessAssertion in failure_types: 160 if test_failures.FailureTestHarnessAssertion in failure_types:
161 test_dict['is_testharness_test'] = True 161 test_dict['is_testharness_test'] = True
162 162
163 return test_dict 163 return test_dict
164 164
165 165
166 def summarize_results(port_obj, expectations, initial_results, retry_results, en abled_pixel_tests_in_retry, only_include_failing=False): 166 def summarize_results(port_obj, expectations, initial_results, retry_results, en abled_pixel_tests_in_retry, only_include_failing=False, archived_results_path=No ne, revision_info=''):
Dirk Pranke 2014/06/17 17:58:58 it feels a bit odd to me that the results object w
patro 2014/07/15 10:36:57 Yes, things can be seperate there need not be any
167 """Returns a dictionary containing a summary of the test runs, with the foll owing fields: 167 """Returns a dictionary containing a summary of the test runs, with the foll owing fields:
168 'version': a version indicator 168 'version': a version indicator
169 'fixable': The number of fixable tests (NOW - PASS) 169 'fixable': The number of fixable tests (NOW - PASS)
170 'skipped': The number of skipped tests (NOW & SKIPPED) 170 'skipped': The number of skipped tests (NOW & SKIPPED)
171 'num_regressions': The number of non-flaky failures 171 'num_regressions': The number of non-flaky failures
172 'num_flaky': The number of flaky failures 172 'num_flaky': The number of flaky failures
173 'num_passes': The number of unexpected passes 173 'num_passes': The number of unexpected passes
174 'tests': a dict of tests -> {'expected': '...', 'actual': '...'} 174 'tests': a dict of tests -> {'expected': '...', 'actual': '...'}
175 """ 175 """
176 results = {} 176 results = {}
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
299 results['num_regressions'] = num_regressions 299 results['num_regressions'] = num_regressions
300 results['interrupted'] = initial_results.interrupted # Does results.html ha ve enough information to compute this itself? (by checking total number of resul ts vs. total number of tests?) 300 results['interrupted'] = initial_results.interrupted # Does results.html ha ve enough information to compute this itself? (by checking total number of resul ts vs. total number of tests?)
301 results['layout_tests_dir'] = port_obj.layout_tests_dir() 301 results['layout_tests_dir'] = port_obj.layout_tests_dir()
302 results['has_wdiff'] = port_obj.wdiff_available() 302 results['has_wdiff'] = port_obj.wdiff_available()
303 results['has_pretty_patch'] = port_obj.pretty_patch_available() 303 results['has_pretty_patch'] = port_obj.pretty_patch_available()
304 results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests') 304 results['pixel_tests_enabled'] = port_obj.get_option('pixel_tests')
305 results['seconds_since_epoch'] = int(time.time()) 305 results['seconds_since_epoch'] = int(time.time())
306 results['build_number'] = port_obj.get_option('build_number') 306 results['build_number'] = port_obj.get_option('build_number')
307 results['builder_name'] = port_obj.get_option('builder_name') 307 results['builder_name'] = port_obj.get_option('builder_name')
308 308
309 if port_obj.get_option("enable_versioned_results") and archived_results_path :
310 results['previous_results_location'] = archived_results_path
311 results['chromium_revision'] = ''
312 results['blink_revision'] = ''
313
314
309 try: 315 try:
310 # Don't do this by default since it takes >100ms. 316 # Don't do this by default since it takes >100ms.
311 # It's only used for uploading data to the flakiness dashboard. 317 # It's only used for uploading data to the flakiness dashboard.
312 if port_obj.get_option("builder_name"): 318 if port_obj.get_option("builder_name"):
313 port_obj.host.initialize_scm() 319 port_obj.host.initialize_scm()
314 for (name, path) in port_obj.repository_paths(): 320 for (name, path) in port_obj.repository_paths():
315 results[name.lower() + '_revision'] = port_obj.host.scm().svn_re vision(path) 321 results[name.lower() + '_revision'] = port_obj.host.scm().svn_re vision(path)
316 except Exception, e: 322 except Exception, e:
317 _log.warn("Failed to determine svn revision for checkout (cwd: %s, webki t_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj ._filesystem.getcwd(), port_obj.path_from_webkit_base(), e)) 323 _log.warn("Failed to determine svn revision for checkout (cwd: %s, webki t_base: %s), leaving 'revision' key blank in full_results.json.\n%s" % (port_obj ._filesystem.getcwd(), port_obj.path_from_webkit_base(), e))
318 # Handle cases where we're running outside of version control. 324 # Handle cases where we're running outside of version control.
319 import traceback 325 import traceback
320 _log.debug('Failed to learn head svn revision:') 326 _log.debug('Failed to learn head svn revision:')
321 _log.debug(traceback.format_exc()) 327 _log.debug(traceback.format_exc())
322 results['chromium_revision'] = "" 328 results['chromium_revision'] = ""
323 results['blink_revision'] = "" 329 results['blink_revision'] = ""
324 330
325 return results 331 return results
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698