Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(59)

Side by Side Diff: tools/valgrind/memcheck_analyze.py

Issue 3056025: Avoid duplicate error reports / suppressions when UI test reports are... (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 10 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | tools/valgrind/tsan_analyze.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/python 1 #!/usr/bin/python
2 # Copyright (c) 2010 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2010 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 # memcheck_analyze.py 6 # memcheck_analyze.py
7 7
8 ''' Given a valgrind XML file, parses errors and uniques them.''' 8 ''' Given a valgrind XML file, parses errors and uniques them.'''
9 9
10 import gdb_helper 10 import gdb_helper
11 11
12 import logging 12 import logging
13 import optparse 13 import optparse
14 import os 14 import os
15 import re 15 import re
16 import subprocess 16 import subprocess
17 import sys 17 import sys
18 import time 18 import time
19 from xml.dom.minidom import parse 19 from xml.dom.minidom import parse
20 from xml.parsers.expat import ExpatError 20 from xml.parsers.expat import ExpatError
21 21
22 import common 22 import common
23 23
24 # Global symbol table (yuck) 24 # Global symbol table (yuck)
25 TheAddressTable = None 25 TheAddressTable = None
26 26
27 # Contains the time when the we started analyzing the first log file.
28 # This variable is used to skip incomplete logs after some timeout.
29 # TODO(timurrrr): Currently, this needs to be a global variable
30 # because analyzer can be called multiple times (e.g. ui_tests)
31 # unless we re-factor the analyze system to avoid creating multiple analyzers.
32 AnalyzeStartTime = None
33
34 # Max time to wait for memcheck logs to complete.
35 LOG_COMPLETION_TIMEOUT = 180.0
36
37 # These are functions (using C++ mangled names) that we look for in stack 27 # These are functions (using C++ mangled names) that we look for in stack
38 # traces. We don't show stack frames while pretty printing when they are below 28 # traces. We don't show stack frames while pretty printing when they are below
39 # any of the following: 29 # any of the following:
40 _TOP_OF_STACK_POINTS = [ 30 _TOP_OF_STACK_POINTS = [
41 # Don't show our testing framework. 31 # Don't show our testing framework.
42 "testing::Test::Run()", 32 "testing::Test::Run()",
43 # Also don't show the internals of libc/pthread. 33 # Also don't show the internals of libc/pthread.
44 "start_thread" 34 "start_thread"
45 ] 35 ]
46 36
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
212 self._backtraces.append([description, frames]) 202 self._backtraces.append([description, frames])
213 description = None 203 description = None
214 stack = None 204 stack = None
215 frames = None 205 frames = None
216 elif node.localName == "suppression": 206 elif node.localName == "suppression":
217 self._suppression = getCDATAOf(node, "rawtext"); 207 self._suppression = getCDATAOf(node, "rawtext");
218 208
219 def __str__(self): 209 def __str__(self):
220 ''' Pretty print the type and backtrace(s) of this specific error, 210 ''' Pretty print the type and backtrace(s) of this specific error,
221 including suppression (which is just a mangled backtrace).''' 211 including suppression (which is just a mangled backtrace).'''
222 output = self._kind + "\n" 212 output = ""
223 if (self._commandline): 213 if (self._commandline):
224 output += self._commandline + "\n" 214 output += self._commandline + "\n"
225 215
216 output += self._kind + "\n"
226 for backtrace in self._backtraces: 217 for backtrace in self._backtraces:
227 output += backtrace[0] + "\n" 218 output += backtrace[0] + "\n"
228 filter = subprocess.Popen("c++filt -n", stdin=subprocess.PIPE, 219 filter = subprocess.Popen("c++filt -n", stdin=subprocess.PIPE,
229 stdout=subprocess.PIPE, 220 stdout=subprocess.PIPE,
230 stderr=subprocess.STDOUT, 221 stderr=subprocess.STDOUT,
231 shell=True, 222 shell=True,
232 close_fds=True) 223 close_fds=True)
233 buf = "" 224 buf = ""
234 for frame in backtrace[1]: 225 for frame in backtrace[1]:
235 buf += (frame[FUNCTION_NAME] or frame[INSTRUCTION_POINTER]) + "\n" 226 buf += (frame[FUNCTION_NAME] or frame[INSTRUCTION_POINTER]) + "\n"
(...skipping 12 matching lines...) Expand all
248 frame[INSTRUCTION_POINTER]) 239 frame[INSTRUCTION_POINTER])
249 if foo[0] != None: 240 if foo[0] != None:
250 output += (" (" + foo[0] + ":" + foo[1] + ")") 241 output += (" (" + foo[0] + ":" + foo[1] + ")")
251 elif frame[SRC_FILE_DIR] != "": 242 elif frame[SRC_FILE_DIR] != "":
252 output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] + 243 output += (" (" + frame[SRC_FILE_DIR] + "/" + frame[SRC_FILE_NAME] +
253 ":" + frame[SRC_LINE] + ")") 244 ":" + frame[SRC_LINE] + ")")
254 else: 245 else:
255 output += " (" + frame[OBJECT_FILE] + ")" 246 output += " (" + frame[OBJECT_FILE] + ")"
256 output += "\n" 247 output += "\n"
257 248
258 # TODO(dank): stop synthesizing suppressions once everyone has 249 assert self._suppression != None, "Your Valgrind doesn't generate " \
259 # valgrind-3.5 and we can rely on xml 250 "suppressions - is it too old?"
260 if (self._suppression == None):
261 output += "Suppression:\n"
262 for frame in backtrace[1]:
263 output += " fun:" + (frame[FUNCTION_NAME] or "*") + "\n"
264 251
265 if (self._suppression != None): 252 output += "Suppression (hash=#%X#):" % self.__hash__()
266 output += "Suppression:" 253 # Widen suppression slightly to make portable between mac and linux
267 # Widen suppression slightly to make portable between mac and linux 254 supp = self._suppression;
268 supp = self._suppression; 255 supp = supp.replace("fun:_Znwj", "fun:_Znw*")
269 supp = supp.replace("fun:_Znwj", "fun:_Znw*") 256 supp = supp.replace("fun:_Znwm", "fun:_Znw*")
270 supp = supp.replace("fun:_Znwm", "fun:_Znw*") 257 # Split into lines so we can enforce length limits
271 # Split into lines so we can enforce length limits 258 supplines = supp.split("\n")
272 supplines = supp.split("\n")
273 259
274 # Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type) 260 # Truncate at line 26 (VG_MAX_SUPP_CALLERS plus 2 for name and type)
275 # or at the first 'boring' caller. 261 # or at the first 'boring' caller.
276 # (https://bugs.kde.org/show_bug.cgi?id=199468 proposes raising 262 # (https://bugs.kde.org/show_bug.cgi?id=199468 proposes raising
277 # VG_MAX_SUPP_CALLERS, but we're probably fine with it as is.) 263 # VG_MAX_SUPP_CALLERS, but we're probably fine with it as is.)
278 # TODO(dkegel): add more boring callers 264 # TODO(dkegel): add more boring callers
279 newlen = 26; 265 newlen = 26;
280 try: 266 try:
281 newlen = min(newlen, supplines.index(" fun:_ZN11MessageLoop3RunEv")) 267 newlen = min(newlen, supplines.index(" fun:_ZN11MessageLoop3RunEv"))
282 except ValueError: 268 except ValueError:
283 pass 269 pass
284 if (len(supplines) > newlen): 270 if (len(supplines) > newlen):
285 supplines = supplines[0:newlen] 271 supplines = supplines[0:newlen]
286 supplines.append("}") 272 supplines.append("}")
287 273
288 output += "\n".join(supplines) + "\n" 274 output += "\n".join(supplines) + "\n"
289 275
290 return output 276 return output
291 277
292 def UniqueString(self): 278 def UniqueString(self):
293 ''' String to use for object identity. Don't print this, use str(obj) 279 ''' String to use for object identity. Don't print this, use str(obj)
294 instead.''' 280 instead.'''
295 rep = self._kind + " " 281 rep = self._kind + " "
296 for backtrace in self._backtraces: 282 for backtrace in self._backtraces:
297 for frame in backtrace[1]: 283 for frame in backtrace[1]:
298 rep += frame[FUNCTION_NAME] 284 rep += frame[FUNCTION_NAME]
(...skipping 14 matching lines...) Expand all
313 f.seek(0) 299 f.seek(0)
314 while True: 300 while True:
315 line = f.readline() 301 line = f.readline()
316 if line == "": 302 if line == "":
317 return False 303 return False
318 if '</valgrindoutput>' in line: 304 if '</valgrindoutput>' in line:
319 # valgrind often has garbage after </valgrindoutput> upon crash 305 # valgrind often has garbage after </valgrindoutput> upon crash
320 f.truncate() 306 f.truncate()
321 return True 307 return True
322 308
323 class MemcheckAnalyze: 309 class MemcheckAnalyzer:
324 ''' Given a set of Valgrind XML files, parse all the errors out of them, 310 ''' Given a set of Valgrind XML files, parse all the errors out of them,
325 unique them and output the results.''' 311 unique them and output the results.'''
326 312
327 SANITY_TEST_SUPPRESSIONS_LINUX = { 313 SANITY_TEST_SUPPRESSIONS_LINUX = {
328 "Memcheck sanity test 01 (memory leak).": 1, 314 "Memcheck sanity test 01 (memory leak).": 1,
329 "Memcheck sanity test 02 (malloc/read left).": 1, 315 "Memcheck sanity test 02 (malloc/read left).": 1,
330 "Memcheck sanity test 03 (malloc/read right).": 1, 316 "Memcheck sanity test 03 (malloc/read right).": 1,
331 "Memcheck sanity test 04 (malloc/write left).": 1, 317 "Memcheck sanity test 04 (malloc/write left).": 1,
332 "Memcheck sanity test 05 (malloc/write right).": 1, 318 "Memcheck sanity test 05 (malloc/write right).": 1,
333 "Memcheck sanity test 06 (new/read left).": 1, 319 "Memcheck sanity test 06 (new/read left).": 1,
(...skipping 13 matching lines...) Expand all
347 "Memcheck sanity test 06 (new/read left).": 1, 333 "Memcheck sanity test 06 (new/read left).": 1,
348 "Memcheck sanity test 07 (new/read right).": 1, 334 "Memcheck sanity test 07 (new/read right).": 1,
349 "Memcheck sanity test 10 (write after free).": 1, 335 "Memcheck sanity test 10 (write after free).": 1,
350 "Memcheck sanity test 11 (write after delete).": 1, 336 "Memcheck sanity test 11 (write after delete).": 1,
351 "bug_49253 Memcheck sanity test 12 (array deleted without []) on Mac.": 1, 337 "bug_49253 Memcheck sanity test 12 (array deleted without []) on Mac.": 1,
352 "bug_49253 Memcheck sanity test 13 (single element deleted with []) on Mac .": 1, 338 "bug_49253 Memcheck sanity test 13 (single element deleted with []) on Mac .": 1,
353 "bug_49253 Memcheck sanity test 04 (malloc/write left) or Memcheck sanity test 05 (malloc/write right) on Mac.": 2, 339 "bug_49253 Memcheck sanity test 04 (malloc/write left) or Memcheck sanity test 05 (malloc/write right) on Mac.": 2,
354 "bug_49253 Memcheck sanity test 08 (new/write left) or Memcheck sanity tes t 09 (new/write right) on Mac.": 2, 340 "bug_49253 Memcheck sanity test 08 (new/write left) or Memcheck sanity tes t 09 (new/write right) on Mac.": 2,
355 } 341 }
356 342
357 def __init__(self, source_dir, files, show_all_leaks=False, use_gdb=False): 343 # Max time to wait for memcheck logs to complete.
358 '''Reads in a set of files. 344 LOG_COMPLETION_TIMEOUT = 180.0
345
346 def __init__(self, source_dir, show_all_leaks=False, use_gdb=False):
347 '''Create a parser for Memcheck logs.
359 348
360 Args: 349 Args:
361 source_dir: Path to top of source tree for this build 350 source_dir: Path to top of source tree for this build
351 show_all_leaks: Whether to show even less important leaks
352 use_gdb: Whether to use gdb to resolve source filenames and line numbers
353 in the report stacktraces
354 '''
355 self._source_dir = source_dir
356 self._show_all_leaks = show_all_leaks
357 self._use_gdb = use_gdb
358
359 # Contains the set of unique errors
360 self._errors = set()
361
362 # Contains the time when the we started analyzing the first log file.
363 # This variable is used to skip incomplete logs after some timeout.
364 self._analyze_start_time = None
365
366
367 def Report(self, files, check_sanity=False):
368 '''Reads in a set of files and prints Memcheck report.
369
370 Args:
362 files: A list of filenames. 371 files: A list of filenames.
363 show_all_leaks: whether to show even less important leaks 372 check_sanity: if true, search for SANITY_TEST_SUPPRESSIONS
364 ''' 373 '''
365
366 # Beyond the detailed errors parsed by ValgrindError above, 374 # Beyond the detailed errors parsed by ValgrindError above,
367 # the xml file contain records describing suppressions that were used: 375 # the xml file contain records describing suppressions that were used:
368 # <suppcounts> 376 # <suppcounts>
369 # <pair> 377 # <pair>
370 # <count>28</count> 378 # <count>28</count>
371 # <name>pango_font_leak_todo</name> 379 # <name>pango_font_leak_todo</name>
372 # </pair> 380 # </pair>
373 # <pair> 381 # <pair>
374 # <count>378</count> 382 # <count>378</count>
375 # <name>bug_13243</name> 383 # <name>bug_13243</name>
376 # </pair> 384 # </pair>
377 # </suppcounts 385 # </suppcounts
378 # Collect these and print them at the end. 386 # Collect these and print them at the end.
379 # 387 #
380 # With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in, 388 # With our patch for https://bugs.kde.org/show_bug.cgi?id=205000 in,
381 # the file also includes records of the form 389 # the file also includes records of the form
382 # <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj> 390 # <load_obj><obj>/usr/lib/libgcc_s.1.dylib</obj><ip>0x27000</ip></load_obj>
383 # giving the filename and load address of each binary that was mapped 391 # giving the filename and load address of each binary that was mapped
384 # into the process. 392 # into the process.
385 393
386 global TheAddressTable 394 global TheAddressTable
387 if use_gdb: 395 if self._use_gdb:
388 TheAddressTable = gdb_helper.AddressTable() 396 TheAddressTable = gdb_helper.AddressTable()
389 self._errors = set() 397 else:
390 self._suppcounts = {} 398 TheAddressTable = None
399 cur_report_errors = set()
400 suppcounts = {}
391 badfiles = set() 401 badfiles = set()
392 402
393 global AnalyzeStartTime 403 if self._analyze_start_time == None:
394 if AnalyzeStartTime == None: 404 self._analyze_start_time = time.time()
395 AnalyzeStartTime = time.time() 405 start_time = self._analyze_start_time
396 self._parse_failed = False 406
407 parse_failed = False
397 for file in files: 408 for file in files:
398 # Wait up to three minutes for valgrind to finish writing all files, 409 # Wait up to three minutes for valgrind to finish writing all files,
399 # but after that, just skip incomplete files and warn. 410 # but after that, just skip incomplete files and warn.
400 f = open(file, "r+") 411 f = open(file, "r+")
401 pid = re.match(".*\.([0-9]+)$", file) 412 pid = re.match(".*\.([0-9]+)$", file)
402 if pid: 413 if pid:
403 pid = pid.groups()[0] 414 pid = pid.groups()[0]
404 found = False 415 found = False
405 running = True 416 running = True
406 firstrun = True 417 firstrun = True
407 origsize = os.path.getsize(file) 418 origsize = os.path.getsize(file)
408 while (running and not found and 419 while (running and not found and
409 (firstrun or 420 (firstrun or
410 ((time.time() - AnalyzeStartTime) < LOG_COMPLETION_TIMEOUT))): 421 ((time.time() - start_time) < self.LOG_COMPLETION_TIMEOUT))):
411 firstrun = False 422 firstrun = False
412 f.seek(0) 423 f.seek(0)
413 if pid: 424 if pid:
414 # Make sure the process is still running so we don't wait for 425 # Make sure the process is still running so we don't wait for
415 # 3 minutes if it was killed. See http://crbug.com/17453 426 # 3 minutes if it was killed. See http://crbug.com/17453
416 ps_out = subprocess.Popen("ps p %s" % pid, shell=True, 427 ps_out = subprocess.Popen("ps p %s" % pid, shell=True,
417 stdout=subprocess.PIPE).stdout 428 stdout=subprocess.PIPE).stdout
418 if ps_out.readlines() < 2: 429 if ps_out.readlines() < 2:
419 running = False 430 running = False
420 found = find_and_truncate(f) 431 found = find_and_truncate(f)
421 if not running and not found: 432 if not running and not found:
422 logging.warn("Valgrind process PID = %s is not running but " 433 logging.warn("Valgrind process PID = %s is not running but "
423 "its XML log has not been finished correctly." % pid) 434 "its XML log has not been finished correctly." % pid)
424 if running and not found: 435 if running and not found:
425 time.sleep(1) 436 time.sleep(1)
426 f.close() 437 f.close()
427 if not found: 438 if not found:
428 badfiles.add(file) 439 badfiles.add(file)
429 else: 440 else:
430 newsize = os.path.getsize(file) 441 newsize = os.path.getsize(file)
431 if origsize > newsize+1: 442 if origsize > newsize+1:
432 logging.warn(str(origsize - newsize) + 443 logging.warn(str(origsize - newsize) +
433 " bytes of junk were after </valgrindoutput> in %s!" % 444 " bytes of junk were after </valgrindoutput> in %s!" %
434 file) 445 file)
435 try: 446 try:
436 parsed_file = parse(file); 447 parsed_file = parse(file);
437 except ExpatError, e: 448 except ExpatError, e:
438 self._parse_failed = True 449 parse_failed = True
439 logging.warn("could not parse %s: %s" % (file, e)) 450 logging.warn("could not parse %s: %s" % (file, e))
440 lineno = e.lineno - 1 451 lineno = e.lineno - 1
441 context_lines = 5 452 context_lines = 5
442 context_start = max(0, lineno - context_lines) 453 context_start = max(0, lineno - context_lines)
443 context_end = lineno + context_lines + 1 454 context_end = lineno + context_lines + 1
444 context_file = open(file, "r") 455 context_file = open(file, "r")
445 for i in range(0, context_start): 456 for i in range(0, context_start):
446 context_file.readline() 457 context_file.readline()
447 for i in range(context_start, context_end): 458 for i in range(context_start, context_end):
448 context_data = context_file.readline().rstrip() 459 context_data = context_file.readline().rstrip()
(...skipping 15 matching lines...) Expand all
464 for node in preamble.getElementsByTagName("line"): 475 for node in preamble.getElementsByTagName("line"):
465 if node.localName == "line": 476 if node.localName == "line":
466 for x in node.childNodes: 477 for x in node.childNodes:
467 if x.nodeType == node.TEXT_NODE and "Command" in x.data: 478 if x.nodeType == node.TEXT_NODE and "Command" in x.data:
468 commandline = x.data 479 commandline = x.data
469 break 480 break
470 481
471 raw_errors = parsed_file.getElementsByTagName("error") 482 raw_errors = parsed_file.getElementsByTagName("error")
472 for raw_error in raw_errors: 483 for raw_error in raw_errors:
473 # Ignore "possible" leaks for now by default. 484 # Ignore "possible" leaks for now by default.
474 if (show_all_leaks or 485 if (self._show_all_leaks or
475 getTextOf(raw_error, "kind") != "Leak_PossiblyLost"): 486 getTextOf(raw_error, "kind") != "Leak_PossiblyLost"):
476 error = ValgrindError(source_dir, raw_error, commandline) 487 error = ValgrindError(self._source_dir, raw_error, commandline)
477 self._errors.add(error) 488 if error not in cur_report_errors:
489 # We haven't seen such errors doing this report yet...
490 if error in self._errors:
491 # ... but we saw it in earlier reports, e.g. previous UI test
492 cur_report_errors.add("This error was already printed "
493 "in some other test, see 'hash=#%X#'" % \
494 error.__hash__())
495 else:
496 # ... and we haven't seen it in other tests as well
497 self._errors.add(error)
498 cur_report_errors.add(error)
478 499
479 suppcountlist = parsed_file.getElementsByTagName("suppcounts") 500 suppcountlist = parsed_file.getElementsByTagName("suppcounts")
480 if len(suppcountlist) > 0: 501 if len(suppcountlist) > 0:
481 suppcountlist = suppcountlist[0] 502 suppcountlist = suppcountlist[0]
482 for node in suppcountlist.getElementsByTagName("pair"): 503 for node in suppcountlist.getElementsByTagName("pair"):
483 count = getTextOf(node, "count"); 504 count = getTextOf(node, "count");
484 name = getTextOf(node, "name"); 505 name = getTextOf(node, "name");
485 if name in self._suppcounts: 506 if name in suppcounts:
486 self._suppcounts[name] += int(count) 507 suppcounts[name] += int(count)
487 else: 508 else:
488 self._suppcounts[name] = int(count) 509 suppcounts[name] = int(count)
489 510
490 if len(badfiles) > 0: 511 if len(badfiles) > 0:
491 logging.warn("valgrind didn't finish writing %d files?!" % len(badfiles)) 512 logging.warn("valgrind didn't finish writing %d files?!" % len(badfiles))
492 for file in badfiles: 513 for file in badfiles:
493 logging.warn("Last 20 lines of %s :" % file) 514 logging.warn("Last 20 lines of %s :" % file)
494 os.system("tail -n 20 '%s' 1>&2" % file) 515 os.system("tail -n 20 '%s' 1>&2" % file)
495 516
496 def Report(self, check_sanity=False): 517 if parse_failed:
497 if self._parse_failed:
498 logging.error("FAIL! Couldn't parse Valgrind output file") 518 logging.error("FAIL! Couldn't parse Valgrind output file")
499 return -2 519 return -2
500 520
501 is_sane = False 521 is_sane = False
502 print "-----------------------------------------------------" 522 print "-----------------------------------------------------"
503 print "Suppressions used:" 523 print "Suppressions used:"
504 print " count name" 524 print " count name"
505 525
506 if common.IsLinux(): 526 if common.IsLinux():
507 remaining_sanity_supp = MemcheckAnalyze.SANITY_TEST_SUPPRESSIONS_LINUX 527 remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS_LINUX
508 elif common.IsMac(): 528 elif common.IsMac():
509 remaining_sanity_supp = MemcheckAnalyze.SANITY_TEST_SUPPRESSIONS_MAC 529 remaining_sanity_supp = MemcheckAnalyzer.SANITY_TEST_SUPPRESSIONS_MAC
510 else: 530 else:
511 remaining_sanity_supp = {} 531 remaining_sanity_supp = {}
512 if check_sanity: 532 if check_sanity:
513 logging.warn("No sanity test list for platform %s", sys.platform) 533 logging.warn("No sanity test list for platform %s", sys.platform)
514 534
515 for (name, count) in sorted(self._suppcounts.items(), 535 for (name, count) in sorted(suppcounts.items(),
516 key=lambda (k,v): (v,k)): 536 key=lambda (k,v): (v,k)):
517 print "%7d %s" % (count, name) 537 print "%7d %s" % (count, name)
518 if name in remaining_sanity_supp and remaining_sanity_supp[name] == count: 538 if name in remaining_sanity_supp and remaining_sanity_supp[name] == count:
519 del remaining_sanity_supp[name] 539 del remaining_sanity_supp[name]
520 if len(remaining_sanity_supp) == 0: 540 if len(remaining_sanity_supp) == 0:
521 is_sane = True 541 is_sane = True
522 print "-----------------------------------------------------" 542 print "-----------------------------------------------------"
523 sys.stdout.flush() 543 sys.stdout.flush()
524 544
525 retcode = 0 545 retcode = 0
526 if self._errors: 546 if self._errors:
527 logging.error("FAIL! There were %s errors: " % len(self._errors)) 547 logging.error("FAIL! There were %s errors: " % len(self._errors))
528 548
529 global TheAddressTable
530 if TheAddressTable != None: 549 if TheAddressTable != None:
531 TheAddressTable.ResolveAll() 550 TheAddressTable.ResolveAll()
532 551
533 for error in self._errors: 552 for error in cur_report_errors:
534 logging.error(error) 553 logging.error(error)
535 554
536 retcode = -1 555 retcode = -1
537 556
538 # Report tool's insanity even if there were errors. 557 # Report tool's insanity even if there were errors.
539 if check_sanity and not is_sane: 558 if check_sanity and not is_sane:
540 logging.error("FAIL! Sanity check failed!") 559 logging.error("FAIL! Sanity check failed!")
541 logging.info("The following test errors were not handled: ") 560 logging.info("The following test errors were not handled: ")
542 for (name, count) in sorted(remaining_sanity_supp.items(), 561 for (name, count) in sorted(remaining_sanity_supp.items(),
543 key=lambda (k,v): (v,k)): 562 key=lambda (k,v): (v,k)):
544 logging.info("%7d %s" % (count, name)) 563 logging.info("%7d %s" % (count, name))
545 retcode = -3 564 retcode = -3
546 565
547 if retcode != 0: 566 if retcode != 0:
548 return retcode 567 return retcode
549 568
550 logging.info("PASS! No errors found!") 569 logging.info("PASS! No errors found!")
551 return 0 570 return 0
552 571
553 def _main(): 572 def _main():
554 '''For testing only. The MemcheckAnalyze class should be imported instead.''' 573 '''For testing only. The MemcheckAnalyzer class should be imported instead.'''
555 retcode = 0 574 retcode = 0
556 parser = optparse.OptionParser("usage: %prog [options] <files to analyze>") 575 parser = optparse.OptionParser("usage: %prog [options] <files to analyze>")
557 parser.add_option("", "--source_dir", 576 parser.add_option("", "--source_dir",
558 help="path to top of source tree for this build" 577 help="path to top of source tree for this build"
559 "(used to normalize source paths in baseline)") 578 "(used to normalize source paths in baseline)")
560 579
561 (options, args) = parser.parse_args() 580 (options, args) = parser.parse_args()
562 if len(args) == 0: 581 if len(args) == 0:
563 parser.error("no filename specified") 582 parser.error("no filename specified")
564 filenames = args 583 filenames = args
565 584
566 analyzer = MemcheckAnalyze(options.source_dir, filenames, use_gdb=True) 585 analyzer = MemcheckAnalyzer(options.source_dir, use_gdb=True)
567 retcode = analyzer.Report() 586 retcode = analyzer.Report(filenames)
568 587
569 sys.exit(retcode) 588 sys.exit(retcode)
570 589
571 if __name__ == "__main__": 590 if __name__ == "__main__":
572 _main() 591 _main()
OLDNEW
« no previous file with comments | « no previous file | tools/valgrind/tsan_analyze.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698