Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: tools/valgrind/memcheck_analyze.py

Issue 7201026: Print suppression hashes for TSan reports (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/valgrind/tsan_analyze.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2010 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2010 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 # memcheck_analyze.py 6 # memcheck_analyze.py
7 7
8 ''' Given a valgrind XML file, parses errors and uniques them.''' 8 ''' Given a valgrind XML file, parses errors and uniques them.'''
9 9
10 import gdb_helper 10 import gdb_helper
11 11
12 import logging 12 import logging
13 import hashlib
Alexander Potapenko 2011/06/20 14:57:04 Please fix the import order
Timur Iskhodzhanov 2011/06/20 15:04:51 Done.
13 import optparse 14 import optparse
14 import os 15 import os
15 import re 16 import re
16 import subprocess 17 import subprocess
17 import sys 18 import sys
18 import time 19 import time
19 from xml.dom.minidom import parse 20 from xml.dom.minidom import parse
20 from xml.parsers.expat import ExpatError 21 from xml.parsers.expat import ExpatError
21 22
22 import common 23 import common
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
247 elif frame[SRC_FILE_DIR] != "": 248 elif frame[SRC_FILE_DIR] != "":
248 output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] + 249 output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] +
249 ":" + frame[SRC_LINE] + ")") 250 ":" + frame[SRC_LINE] + ")")
250 else: 251 else:
251 output += " (" + frame[OBJECT_FILE] + ")" 252 output += " (" + frame[OBJECT_FILE] + ")"
252 output += "\n" 253 output += "\n"
253 254
254 assert self._suppression != None, "Your Valgrind doesn't generate " \ 255 assert self._suppression != None, "Your Valgrind doesn't generate " \
255 "suppressions - is it too old?" 256 "suppressions - is it too old?"
256 257
257 output += "Suppression (error hash=#%016X#):" % \ 258 output += "Suppression (error hash=#%016X#):\n" % self.ErrorHash()
258 (self.__hash__() & 0xffffffffffffffff) 259 output += (" For more info on using suppressions see "
260 "http://dev.chromium.org/developers/how-tos/using-valgrind#TOC-Su ppressing-Errors")
261
259 # Widen suppression slightly to make portable between mac and linux 262 # Widen suppression slightly to make portable between mac and linux
260 supp = self._suppression; 263 supp = self._suppression;
261 supp = supp.replace("fun:_Znwj", "fun:_Znw*") 264 supp = supp.replace("fun:_Znwj", "fun:_Znw*")
262 supp = supp.replace("fun:_Znwm", "fun:_Znw*") 265 supp = supp.replace("fun:_Znwm", "fun:_Znw*")
263 supp = supp.replace("fun:_Znaj", "fun:_Zna*") 266 supp = supp.replace("fun:_Znaj", "fun:_Zna*")
264 supp = supp.replace("fun:_Znam", "fun:_Zna*") 267 supp = supp.replace("fun:_Znam", "fun:_Zna*")
265 # Split into lines so we can enforce length limits 268 # Split into lines so we can enforce length limits
266 supplines = supp.split("\n") 269 supplines = supp.split("\n")
267 270
268 # Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type) 271 # Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type)
(...skipping 24 matching lines...) Expand all
293 for frame in backtrace[1]: 296 for frame in backtrace[1]:
294 rep += frame[FUNCTION_NAME] 297 rep += frame[FUNCTION_NAME]
295 298
296 if frame[SRC_FILE_DIR] != "": 299 if frame[SRC_FILE_DIR] != "":
297 rep += frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] 300 rep += frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME]
298 else: 301 else:
299 rep += frame[OBJECT_FILE] 302 rep += frame[OBJECT_FILE]
300 303
301 return rep 304 return rep
302 305
306 # This is a device-independent hash identifying the suppression.
307 # By printing out this hash we can find duplicate reports between tests and
308 # different shards running on multiple buildbots
309 def ErrorHash(self):
310 return int(hashlib.md5(self.UniqueString()).hexdigest()[:16], 16)
311
303 def __hash__(self): 312 def __hash__(self):
304 return hash(self.UniqueString()) 313 return hash(self.UniqueString())
305 def __eq__(self, rhs): 314 def __eq__(self, rhs):
306 return self.UniqueString() == rhs 315 return self.UniqueString() == rhs
307 316
308 def find_and_truncate(f): 317 def find_and_truncate(f):
309 f.seek(0) 318 f.seek(0)
310 while True: 319 while True:
311 line = f.readline() 320 line = f.readline()
312 if line == "": 321 if line == "":
(...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after
482 # Ignore "possible" leaks for now by default. 491 # Ignore "possible" leaks for now by default.
483 if (self._show_all_leaks or 492 if (self._show_all_leaks or
484 getTextOf(raw_error, "kind") != "Leak_PossiblyLost"): 493 getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
485 error = ValgrindError(self._source_dir, raw_error, commandline) 494 error = ValgrindError(self._source_dir, raw_error, commandline)
486 if error not in cur_report_errors: 495 if error not in cur_report_errors:
487 # We haven't seen such errors doing this report yet... 496 # We haven't seen such errors doing this report yet...
488 if error in self._errors: 497 if error in self._errors:
489 # ... but we saw it in earlier reports, e.g. previous UI test 498 # ... but we saw it in earlier reports, e.g. previous UI test
490 cur_report_errors.add("This error was already printed in " 499 cur_report_errors.add("This error was already printed in "
491 "some other test, see 'hash=#%016X#'" % \ 500 "some other test, see 'hash=#%016X#'" % \
492 (error.__hash__() & 0xffffffffffffffff)) 501 self.ErrorHash())
493 else: 502 else:
494 # ... and we haven't seen it in other tests as well 503 # ... and we haven't seen it in other tests as well
495 self._errors.add(error) 504 self._errors.add(error)
496 cur_report_errors.add(error) 505 cur_report_errors.add(error)
497 506
498 suppcountlist = parsed_file.getElementsByTagName("suppcounts") 507 suppcountlist = parsed_file.getElementsByTagName("suppcounts")
499 if len(suppcountlist) > 0: 508 if len(suppcountlist) > 0:
500 suppcountlist = suppcountlist[0] 509 suppcountlist = suppcountlist[0]
501 for node in suppcountlist.getElementsByTagName("pair"): 510 for node in suppcountlist.getElementsByTagName("pair"):
502 count = getTextOf(node, "count"); 511 count = getTextOf(node, "count");
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
572 parser.error("no filename specified") 581 parser.error("no filename specified")
573 filenames = args 582 filenames = args
574 583
575 analyzer = MemcheckAnalyzer(options.source_dir, use_gdb=True) 584 analyzer = MemcheckAnalyzer(options.source_dir, use_gdb=True)
576 retcode = analyzer.Report(filenames) 585 retcode = analyzer.Report(filenames)
577 586
578 sys.exit(retcode) 587 sys.exit(retcode)
579 588
580 if __name__ == "__main__": 589 if __name__ == "__main__":
581 _main() 590 _main()
OLDNEW
« no previous file with comments | « no previous file | tools/valgrind/tsan_analyze.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698