OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """The deep heap profiler script for Chrome.""" | 5 """The deep heap profiler script for Chrome.""" |
6 | 6 |
7 import copy | 7 import copy |
8 import datetime | 8 import datetime |
9 import json | 9 import json |
10 import logging | 10 import logging |
(...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 len(self._symbol_mapping_caches[symbol_type])) | 370 len(self._symbol_mapping_caches[symbol_type])) |
371 except IOError as e: | 371 except IOError as e: |
372 LOGGER.info('The symbol cache file is invalid: %s' % e) | 372 LOGGER.info('The symbol cache file is invalid: %s' % e) |
373 | 373 |
374 | 374 |
375 class Rule(object): | 375 class Rule(object): |
376 """Represents one matching rule in a policy file.""" | 376 """Represents one matching rule in a policy file.""" |
377 | 377 |
378 def __init__(self, | 378 def __init__(self, |
379 name, | 379 name, |
380 mmap, | 380 allocator_type, |
381 stackfunction_pattern=None, | 381 stackfunction_pattern=None, |
382 stacksourcefile_pattern=None, | 382 stacksourcefile_pattern=None, |
383 typeinfo_pattern=None): | 383 typeinfo_pattern=None, |
| 384 mappedfile_pattern=None, |
| 385 mappedexecutable_pattern=None): |
384 self._name = name | 386 self._name = name |
385 self._mmap = mmap | 387 self._allocator_type = allocator_type |
386 | 388 |
387 self._stackfunction_pattern = None | 389 self._stackfunction_pattern = None |
388 if stackfunction_pattern: | 390 if stackfunction_pattern: |
389 self._stackfunction_pattern = re.compile( | 391 self._stackfunction_pattern = re.compile( |
390 stackfunction_pattern + r'\Z') | 392 stackfunction_pattern + r'\Z') |
391 | 393 |
392 self._stacksourcefile_pattern = None | 394 self._stacksourcefile_pattern = None |
393 if stacksourcefile_pattern: | 395 if stacksourcefile_pattern: |
394 self._stacksourcefile_pattern = re.compile( | 396 self._stacksourcefile_pattern = re.compile( |
395 stacksourcefile_pattern + r'\Z') | 397 stacksourcefile_pattern + r'\Z') |
396 | 398 |
397 self._typeinfo_pattern = None | 399 self._typeinfo_pattern = None |
398 if typeinfo_pattern: | 400 if typeinfo_pattern: |
399 self._typeinfo_pattern = re.compile(typeinfo_pattern + r'\Z') | 401 self._typeinfo_pattern = re.compile(typeinfo_pattern + r'\Z') |
400 | 402 |
| 403 self._mappedfile_pattern = None |
| 404 if mappedfile_pattern: |
| 405 self._mappedfile_pattern = re.compile(mappedfile_pattern + r'\Z') |
| 406 |
| 407 self._mappedexecutable_pattern = None |
| 408 if mappedexecutable_pattern: |
| 409 self._mappedexecutable_pattern = re.compile( |
| 410 mappedexecutable_pattern + r'\Z') |
| 411 |
401 @property | 412 @property |
402 def name(self): | 413 def name(self): |
403 return self._name | 414 return self._name |
404 | 415 |
405 @property | 416 @property |
406 def mmap(self): | 417 def allocator_type(self): |
407 return self._mmap | 418 return self._allocator_type |
408 | 419 |
409 @property | 420 @property |
410 def stackfunction_pattern(self): | 421 def stackfunction_pattern(self): |
411 return self._stackfunction_pattern | 422 return self._stackfunction_pattern |
412 | 423 |
413 @property | 424 @property |
414 def stacksourcefile_pattern(self): | 425 def stacksourcefile_pattern(self): |
415 return self._stacksourcefile_pattern | 426 return self._stacksourcefile_pattern |
416 | 427 |
417 @property | 428 @property |
418 def typeinfo_pattern(self): | 429 def typeinfo_pattern(self): |
419 return self._typeinfo_pattern | 430 return self._typeinfo_pattern |
420 | 431 |
| 432 @property |
| 433 def mappedfile_pattern(self): |
| 434 return self._mappedfile_pattern |
| 435 |
| 436 @property |
| 437 def mappedexecutable_pattern(self): |
| 438 return self._mappedexecutable_pattern |
| 439 |
421 | 440 |
422 class Policy(object): | 441 class Policy(object): |
423 """Represents a policy, a content of a policy file.""" | 442 """Represents a policy, a content of a policy file.""" |
424 | 443 |
425 def __init__(self, rules, version, components): | 444 def __init__(self, rules, version, components): |
426 self._rules = rules | 445 self._rules = rules |
427 self._version = version | 446 self._version = version |
428 self._components = components | 447 self._components = components |
429 | 448 |
430 @property | 449 @property |
(...skipping 22 matching lines...) Expand all Loading... |
453 if bucket.component_cache: | 472 if bucket.component_cache: |
454 return bucket.component_cache | 473 return bucket.component_cache |
455 | 474 |
456 stackfunction = bucket.symbolized_joined_stackfunction | 475 stackfunction = bucket.symbolized_joined_stackfunction |
457 stacksourcefile = bucket.symbolized_joined_stacksourcefile | 476 stacksourcefile = bucket.symbolized_joined_stacksourcefile |
458 typeinfo = bucket.symbolized_typeinfo | 477 typeinfo = bucket.symbolized_typeinfo |
459 if typeinfo.startswith('0x'): | 478 if typeinfo.startswith('0x'): |
460 typeinfo = bucket.typeinfo_name | 479 typeinfo = bucket.typeinfo_name |
461 | 480 |
462 for rule in self._rules: | 481 for rule in self._rules: |
463 if (bucket.mmap == rule.mmap and | 482 if (bucket.allocator_type == rule.allocator_type and |
464 (not rule.stackfunction_pattern or | 483 (not rule.stackfunction_pattern or |
465 rule.stackfunction_pattern.match(stackfunction)) and | 484 rule.stackfunction_pattern.match(stackfunction)) and |
466 (not rule.stacksourcefile_pattern or | 485 (not rule.stacksourcefile_pattern or |
467 rule.stacksourcefile_pattern.match(stacksourcefile)) and | 486 rule.stacksourcefile_pattern.match(stacksourcefile)) and |
468 (not rule.typeinfo_pattern or rule.typeinfo_pattern.match(typeinfo))): | 487 (not rule.typeinfo_pattern or rule.typeinfo_pattern.match(typeinfo))): |
469 bucket.component_cache = rule.name | 488 bucket.component_cache = rule.name |
470 return rule.name | 489 return rule.name |
471 | 490 |
472 assert False | 491 assert False |
473 | 492 |
| 493 def find_unhooked(self, region): |
| 494 for rule in self._rules: |
| 495 if (region[0] == 'unhooked' and |
| 496 rule.allocator_type == 'unhooked' and |
| 497 (not rule.mappedfile_pattern or |
| 498 rule.mappedfile_pattern.match(region[1]['vma']['name'])) and |
| 499 (not rule.mappedexecutable_pattern or |
| 500 rule.mappedexecutable_pattern.match( |
| 501 region[1]['vma']['executable']))): |
| 502 return rule.name |
| 503 |
| 504 assert False |
| 505 |
474 @staticmethod | 506 @staticmethod |
475 def load(filename, filetype): | 507 def load(filename, filetype): |
476 """Loads a policy file of |filename| in a |format|. | 508 """Loads a policy file of |filename| in a |format|. |
477 | 509 |
478 Args: | 510 Args: |
479 filename: A filename to be loaded. | 511 filename: A filename to be loaded. |
480 filetype: A string to specify a type of the file. Only 'json' is | 512 filetype: A string to specify a type of the file. Only 'json' is |
481 supported for now. | 513 supported for now. |
482 | 514 |
483 Returns: | 515 Returns: |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
518 A loaded policy object. | 550 A loaded policy object. |
519 """ | 551 """ |
520 policy = json.load(policy_f) | 552 policy = json.load(policy_f) |
521 | 553 |
522 rules = [] | 554 rules = [] |
523 for rule in policy['rules']: | 555 for rule in policy['rules']: |
524 stackfunction = rule.get('stackfunction') or rule.get('stacktrace') | 556 stackfunction = rule.get('stackfunction') or rule.get('stacktrace') |
525 stacksourcefile = rule.get('stacksourcefile') | 557 stacksourcefile = rule.get('stacksourcefile') |
526 rules.append(Rule( | 558 rules.append(Rule( |
527 rule['name'], | 559 rule['name'], |
528 rule['allocator'] == 'mmap', | 560 rule['allocator'], # allocator_type |
529 stackfunction, | 561 stackfunction, |
530 stacksourcefile, | 562 stacksourcefile, |
531 rule['typeinfo'] if 'typeinfo' in rule else None)) | 563 rule['typeinfo'] if 'typeinfo' in rule else None, |
| 564 rule.get('mappedfile'), |
| 565 rule.get('mappedexecutable'))) |
532 | 566 |
533 return Policy(rules, policy['version'], policy['components']) | 567 return Policy(rules, policy['version'], policy['components']) |
534 | 568 |
535 | 569 |
536 class PolicySet(object): | 570 class PolicySet(object): |
537 """Represents a set of policies.""" | 571 """Represents a set of policies.""" |
538 | 572 |
539 def __init__(self, policy_directory): | 573 def __init__(self, policy_directory): |
540 self._policy_directory = policy_directory | 574 self._policy_directory = policy_directory |
541 | 575 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
591 LOGGER.info(' %s: %s' % (label, directory[label]['file'])) | 625 LOGGER.info(' %s: %s' % (label, directory[label]['file'])) |
592 loaded = Policy.load(directory[label]['file'], directory[label]['format']) | 626 loaded = Policy.load(directory[label]['file'], directory[label]['format']) |
593 if loaded: | 627 if loaded: |
594 policies[label] = loaded | 628 policies[label] = loaded |
595 return PolicySet(policies) | 629 return PolicySet(policies) |
596 | 630 |
597 | 631 |
598 class Bucket(object): | 632 class Bucket(object): |
599 """Represents a bucket, which is a unit of memory block classification.""" | 633 """Represents a bucket, which is a unit of memory block classification.""" |
600 | 634 |
601 def __init__(self, stacktrace, mmap, typeinfo, typeinfo_name): | 635 def __init__(self, stacktrace, allocator_type, typeinfo, typeinfo_name): |
602 self._stacktrace = stacktrace | 636 self._stacktrace = stacktrace |
603 self._mmap = mmap | 637 self._allocator_type = allocator_type |
604 self._typeinfo = typeinfo | 638 self._typeinfo = typeinfo |
605 self._typeinfo_name = typeinfo_name | 639 self._typeinfo_name = typeinfo_name |
606 | 640 |
607 self._symbolized_stackfunction = stacktrace | 641 self._symbolized_stackfunction = stacktrace |
608 self._symbolized_joined_stackfunction = '' | 642 self._symbolized_joined_stackfunction = '' |
609 self._symbolized_stacksourcefile = stacktrace | 643 self._symbolized_stacksourcefile = stacktrace |
610 self._symbolized_joined_stacksourcefile = '' | 644 self._symbolized_joined_stacksourcefile = '' |
611 self._symbolized_typeinfo = typeinfo_name | 645 self._symbolized_typeinfo = typeinfo_name |
612 | 646 |
613 self.component_cache = '' | 647 self.component_cache = '' |
614 | 648 |
615 def __str__(self): | 649 def __str__(self): |
616 result = [] | 650 result = [] |
617 result.append('mmap' if self._mmap else 'malloc') | 651 result.append(self._allocator_type) |
618 if self._symbolized_typeinfo == 'no typeinfo': | 652 if self._symbolized_typeinfo == 'no typeinfo': |
619 result.append('tno_typeinfo') | 653 result.append('tno_typeinfo') |
620 else: | 654 else: |
621 result.append('t' + self._symbolized_typeinfo) | 655 result.append('t' + self._symbolized_typeinfo) |
622 result.append('n' + self._typeinfo_name) | 656 result.append('n' + self._typeinfo_name) |
623 result.extend(['%s(@%s)' % (function, sourcefile) | 657 result.extend(['%s(@%s)' % (function, sourcefile) |
624 for function, sourcefile | 658 for function, sourcefile |
625 in zip(self._symbolized_stackfunction, | 659 in zip(self._symbolized_stackfunction, |
626 self._symbolized_stacksourcefile)]) | 660 self._symbolized_stacksourcefile)]) |
627 return ' '.join(result) | 661 return ' '.join(result) |
(...skipping 24 matching lines...) Expand all Loading... |
652 self._symbolized_typeinfo = 'no typeinfo' | 686 self._symbolized_typeinfo = 'no typeinfo' |
653 | 687 |
654 def clear_component_cache(self): | 688 def clear_component_cache(self): |
655 self.component_cache = '' | 689 self.component_cache = '' |
656 | 690 |
657 @property | 691 @property |
658 def stacktrace(self): | 692 def stacktrace(self): |
659 return self._stacktrace | 693 return self._stacktrace |
660 | 694 |
661 @property | 695 @property |
662 def mmap(self): | 696 def allocator_type(self): |
663 return self._mmap | 697 return self._allocator_type |
664 | 698 |
665 @property | 699 @property |
666 def typeinfo(self): | 700 def typeinfo(self): |
667 return self._typeinfo | 701 return self._typeinfo |
668 | 702 |
669 @property | 703 @property |
670 def typeinfo_name(self): | 704 def typeinfo_name(self): |
671 return self._typeinfo_name | 705 return self._typeinfo_name |
672 | 706 |
673 @property | 707 @property |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
733 self._typeinfo_addresses.add(typeinfo) | 767 self._typeinfo_addresses.add(typeinfo) |
734 elif word[0] == 'n': | 768 elif word[0] == 'n': |
735 typeinfo_name = word[1:] | 769 typeinfo_name = word[1:] |
736 else: | 770 else: |
737 stacktrace_begin = index | 771 stacktrace_begin = index |
738 break | 772 break |
739 stacktrace = [int(address, 16) for address in words[stacktrace_begin:]] | 773 stacktrace = [int(address, 16) for address in words[stacktrace_begin:]] |
740 for frame in stacktrace: | 774 for frame in stacktrace: |
741 self._code_addresses.add(frame) | 775 self._code_addresses.add(frame) |
742 self._buckets[int(words[0])] = Bucket( | 776 self._buckets[int(words[0])] = Bucket( |
743 stacktrace, words[1] == 'mmap', typeinfo, typeinfo_name) | 777 stacktrace, words[1], typeinfo, typeinfo_name) |
744 | 778 |
745 def __iter__(self): | 779 def __iter__(self): |
746 for bucket_id, bucket_content in self._buckets.iteritems(): | 780 for bucket_id, bucket_content in self._buckets.iteritems(): |
747 yield bucket_id, bucket_content | 781 yield bucket_id, bucket_content |
748 | 782 |
749 def __getitem__(self, bucket_id): | 783 def __getitem__(self, bucket_id): |
750 return self._buckets[bucket_id] | 784 return self._buckets[bucket_id] |
751 | 785 |
752 def get(self, bucket_id): | 786 def get(self, bucket_id): |
753 return self._buckets.get(bucket_id) | 787 return self._buckets.get(bucket_id) |
(...skipping 17 matching lines...) Expand all Loading... |
771 | 805 |
772 class Dump(object): | 806 class Dump(object): |
773 """Represents a heap profile dump.""" | 807 """Represents a heap profile dump.""" |
774 | 808 |
775 _PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$') | 809 _PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$') |
776 | 810 |
777 _HOOK_PATTERN = re.compile( | 811 _HOOK_PATTERN = re.compile( |
778 r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+' | 812 r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+' |
779 r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE) | 813 r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE) |
780 | 814 |
| 815 _HOOKED_PATTERN = re.compile(r'(.+) ([0-9]+) / ([0-9]+) @ ([0-9]+)') |
| 816 _UNHOOKED_PATTERN = re.compile(r'(.+) ([0-9]+) / ([0-9]+)') |
| 817 |
| 818 _OLD_HOOKED_PATTERN = re.compile(r'(.+) @ ([0-9]+)') |
| 819 _OLD_UNHOOKED_PATTERN = re.compile(r'(.+) ([0-9]+)') |
| 820 |
781 _TIME_PATTERN_FORMAT = re.compile( | 821 _TIME_PATTERN_FORMAT = re.compile( |
782 r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?') | 822 r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?') |
783 _TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$') | 823 _TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$') |
784 | 824 |
785 def __init__(self, path, modified_time): | 825 def __init__(self, path, modified_time): |
786 self._path = path | 826 self._path = path |
787 matched = self._PATH_PATTERN.match(path) | 827 matched = self._PATH_PATTERN.match(path) |
788 self._pid = int(matched.group(2)) | 828 self._pid = int(matched.group(2)) |
789 self._count = int(matched.group(3)) | 829 self._count = int(matched.group(3)) |
790 self._time = modified_time | 830 self._time = modified_time |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
951 def _parse_mmap_list(self): | 991 def _parse_mmap_list(self): |
952 """Parses lines in self._lines as a mmap list.""" | 992 """Parses lines in self._lines as a mmap list.""" |
953 (ln, found) = skip_while( | 993 (ln, found) = skip_while( |
954 0, len(self._lines), | 994 0, len(self._lines), |
955 lambda n: self._lines[n] != 'MMAP_LIST:\n') | 995 lambda n: self._lines[n] != 'MMAP_LIST:\n') |
956 if not found: | 996 if not found: |
957 return {} | 997 return {} |
958 | 998 |
959 ln += 1 | 999 ln += 1 |
960 self._map = {} | 1000 self._map = {} |
| 1001 current_vma = dict() |
961 while True: | 1002 while True: |
962 entry = proc_maps.ProcMaps.parse_line(self._lines[ln]) | 1003 entry = proc_maps.ProcMaps.parse_line(self._lines[ln]) |
963 if entry: | 1004 if entry: |
| 1005 current_vma = dict() |
964 for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end): | 1006 for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end): |
965 for key, value in entry.as_dict().iteritems(): | 1007 for key, value in entry.as_dict().iteritems(): |
966 attr[key] = value | 1008 attr[key] = value |
| 1009 current_vma[key] = value |
967 ln += 1 | 1010 ln += 1 |
968 continue | 1011 continue |
969 matched = self._HOOK_PATTERN.match(self._lines[ln]) | 1012 matched = self._HOOK_PATTERN.match(self._lines[ln]) |
970 if not matched: | 1013 if not matched: |
971 break | 1014 break |
972 # 2: starting address | 1015 # 2: starting address |
973 # 5: end address | 1016 # 5: end address |
974 # 7: hooked or unhooked | 1017 # 7: hooked or unhooked |
975 # 8: additional information | 1018 # 8: additional information |
| 1019 if matched.group(7) == 'hooked': |
| 1020 matched_hooked = self._HOOKED_PATTERN.match(matched.group(8)) |
| 1021 if matched_hooked: |
| 1022 region_info = { |
| 1023 'vma': current_vma, |
| 1024 'type': matched_hooked.group(1), |
| 1025 'committed': int(matched_hooked.group(2)), |
| 1026 'virtual': int(matched_hooked.group(3)), |
| 1027 'bucket_id': int(matched_hooked.group(4)), |
| 1028 } |
| 1029 else: |
| 1030 matched_old_hooked = self._OLD_HOOKED_PATTERN.match(matched.group(8)) |
| 1031 if matched_old_hooked: |
| 1032 region_info = { |
| 1033 'vma': current_vma, |
| 1034 'type': matched_hooked.group(1), |
| 1035 'bucket_id': int(matched_hooked.group(2)), |
| 1036 } |
| 1037 else: |
| 1038 region_info = { 'vma': current_vma } |
| 1039 elif matched.group(7) == 'unhooked': |
| 1040 matched_unhooked = self._UNHOOKED_PATTERN.match(matched.group(8)) |
| 1041 if matched_unhooked: |
| 1042 region_info = { |
| 1043 'vma': current_vma, |
| 1044 'type': matched_unhooked.group(1), |
| 1045 'committed': int(matched_unhooked.group(2)), |
| 1046 'virtual': int(matched_unhooked.group(3)), |
| 1047 } |
| 1048 else: |
| 1049 matched_old_unhooked = self._OLD_UNHOOKED_PATTERN.match( |
| 1050 matched.group(8)) |
| 1051 if matched_old_unhooked: |
| 1052 region_info = { |
| 1053 'vma': current_vma, |
| 1054 'type': matched_unhooked.group(1), |
| 1055 'committed': int(matched_unhooked.group(2)), |
| 1056 } |
| 1057 else: |
| 1058 region_info = { 'vma': current_vma } |
| 1059 else: |
| 1060 assert matched.group(7) in ['hooked', 'unhooked'] |
| 1061 |
976 self._map[(int(matched.group(2), 16), | 1062 self._map[(int(matched.group(2), 16), |
977 int(matched.group(5), 16))] = (matched.group(7), | 1063 int(matched.group(5), 16))] = (matched.group(7), region_info) |
978 matched.group(8)) | |
979 ln += 1 | 1064 ln += 1 |
980 | 1065 |
981 def _extract_stacktrace_lines(self, line_number): | 1066 def _extract_stacktrace_lines(self, line_number): |
982 """Extracts the position of stacktrace lines. | 1067 """Extracts the position of stacktrace lines. |
983 | 1068 |
984 Valid stacktrace lines are stored into self._stacktrace_lines. | 1069 Valid stacktrace lines are stored into self._stacktrace_lines. |
985 | 1070 |
986 Args: | 1071 Args: |
987 line_number: A line number to start parsing in lines. | 1072 line_number: A line number to start parsing in lines. |
988 | 1073 |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1281 first_dump_time: An integer representing time when the first dump is | 1366 first_dump_time: An integer representing time when the first dump is |
1282 dumped. | 1367 dumped. |
1283 | 1368 |
1284 Returns: | 1369 Returns: |
1285 A dict mapping components and their corresponding sizes. | 1370 A dict mapping components and their corresponding sizes. |
1286 """ | 1371 """ |
1287 LOGGER.info(' %s' % dump.path) | 1372 LOGGER.info(' %s' % dump.path) |
1288 sizes = dict((c, 0) for c in policy.components) | 1373 sizes = dict((c, 0) for c in policy.components) |
1289 | 1374 |
1290 PolicyCommands._accumulate(dump, policy, bucket_set, sizes) | 1375 PolicyCommands._accumulate(dump, policy, bucket_set, sizes) |
| 1376 PolicyCommands._accumulate_map(dump, policy, sizes) |
1291 | 1377 |
1292 sizes['mmap-no-log'] = ( | 1378 sizes['mmap-no-log'] = ( |
1293 dump.global_stat('profiled-mmap_committed') - | 1379 dump.global_stat('profiled-mmap_committed') - |
1294 sizes['mmap-total-log']) | 1380 sizes['mmap-total-log']) |
1295 sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed') | 1381 sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed') |
1296 sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual') | 1382 sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual') |
1297 | 1383 |
1298 sizes['tc-no-log'] = ( | 1384 sizes['tc-no-log'] = ( |
1299 dump.global_stat('profiled-malloc_committed') - | 1385 dump.global_stat('profiled-malloc_committed') - |
1300 sizes['tc-total-log']) | 1386 sizes['tc-total-log']) |
1301 sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed') | 1387 sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed') |
1302 sizes['tc-unused'] = ( | 1388 sizes['tc-unused'] = ( |
1303 sizes['mmap-tcmalloc'] - | 1389 sizes['mmap-tcmalloc'] - |
1304 dump.global_stat('profiled-malloc_committed')) | 1390 dump.global_stat('profiled-malloc_committed')) |
| 1391 if sizes['tc-unused'] < 0: |
| 1392 LOGGER.warn(' Assuming tc-unused=0 as it is negative: %d (bytes)' % |
| 1393 sizes['tc-unused']) |
| 1394 sizes['tc-unused'] = 0 |
1305 sizes['tc-total'] = sizes['mmap-tcmalloc'] | 1395 sizes['tc-total'] = sizes['mmap-tcmalloc'] |
1306 | 1396 |
1307 for key, value in { | 1397 for key, value in { |
1308 'total': 'total_committed', | 1398 'total': 'total_committed', |
1309 'filemapped': 'file_committed', | 1399 'filemapped': 'file_committed', |
1310 'absent': 'absent_committed', | 1400 'absent': 'absent_committed', |
1311 'file-exec': 'file-exec_committed', | 1401 'file-exec': 'file-exec_committed', |
1312 'file-nonexec': 'file-nonexec_committed', | 1402 'file-nonexec': 'file-nonexec_committed', |
1313 'anonymous': 'anonymous_committed', | 1403 'anonymous': 'anonymous_committed', |
1314 'stack': 'stack_committed', | 1404 'stack': 'stack_committed', |
1315 'other': 'other_committed', | 1405 'other': 'other_committed', |
1316 'unhooked-absent': 'nonprofiled-absent_committed', | 1406 'unhooked-absent': 'nonprofiled-absent_committed', |
1317 'unhooked-anonymous': 'nonprofiled-anonymous_committed', | |
1318 'unhooked-file-exec': 'nonprofiled-file-exec_committed', | |
1319 'unhooked-file-nonexec': 'nonprofiled-file-nonexec_committed', | |
1320 'unhooked-stack': 'nonprofiled-stack_committed', | |
1321 'unhooked-other': 'nonprofiled-other_committed', | |
1322 'total-vm': 'total_virtual', | 1407 'total-vm': 'total_virtual', |
1323 'filemapped-vm': 'file_virtual', | 1408 'filemapped-vm': 'file_virtual', |
1324 'anonymous-vm': 'anonymous_virtual', | 1409 'anonymous-vm': 'anonymous_virtual', |
1325 'other-vm': 'other_virtual' }.iteritems(): | 1410 'other-vm': 'other_virtual' }.iteritems(): |
1326 if key in sizes: | 1411 if key in sizes: |
1327 sizes[key] = dump.global_stat(value) | 1412 sizes[key] = dump.global_stat(value) |
1328 | 1413 |
1329 if 'mustbezero' in sizes: | 1414 if 'mustbezero' in sizes: |
1330 removed_list = ( | 1415 removed_list = ( |
1331 'profiled-mmap_committed', | 1416 'profiled-mmap_committed', |
(...skipping 27 matching lines...) Expand all Loading... |
1359 component_match = policy.find(bucket) | 1444 component_match = policy.find(bucket) |
1360 sizes[component_match] += int(words[COMMITTED]) | 1445 sizes[component_match] += int(words[COMMITTED]) |
1361 | 1446 |
1362 if component_match.startswith('tc-'): | 1447 if component_match.startswith('tc-'): |
1363 sizes['tc-total-log'] += int(words[COMMITTED]) | 1448 sizes['tc-total-log'] += int(words[COMMITTED]) |
1364 elif component_match.startswith('mmap-'): | 1449 elif component_match.startswith('mmap-'): |
1365 sizes['mmap-total-log'] += int(words[COMMITTED]) | 1450 sizes['mmap-total-log'] += int(words[COMMITTED]) |
1366 else: | 1451 else: |
1367 sizes['other-total-log'] += int(words[COMMITTED]) | 1452 sizes['other-total-log'] += int(words[COMMITTED]) |
1368 | 1453 |
| 1454 @staticmethod |
| 1455 def _accumulate_map(dump, policy, sizes): |
| 1456 for _, value in dump.iter_map: |
| 1457 if value[0] == 'unhooked': |
| 1458 component_match = policy.find_unhooked(value) |
| 1459 sizes[component_match] += int(value[1]['committed']) |
| 1460 |
1369 | 1461 |
1370 class CSVCommand(PolicyCommands): | 1462 class CSVCommand(PolicyCommands): |
1371 def __init__(self): | 1463 def __init__(self): |
1372 super(CSVCommand, self).__init__('csv') | 1464 super(CSVCommand, self).__init__('csv') |
1373 | 1465 |
1374 def do(self, sys_argv): | 1466 def do(self, sys_argv): |
1375 policy_set, dumps, bucket_set = self._set_up(sys_argv) | 1467 policy_set, dumps, bucket_set = self._set_up(sys_argv) |
1376 return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout) | 1468 return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout) |
1377 | 1469 |
1378 @staticmethod | 1470 @staticmethod |
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1505 max_dump_count_digit = len(str(max_dump_count)) | 1597 max_dump_count_digit = len(str(max_dump_count)) |
1506 for begin, end, attr in range_dict.iter_range(): | 1598 for begin, end, attr in range_dict.iter_range(): |
1507 out.write('%x-%x\n' % (begin, end)) | 1599 out.write('%x-%x\n' % (begin, end)) |
1508 if len(attr) < max_dump_count: | 1600 if len(attr) < max_dump_count: |
1509 attr[max_dump_count] = None | 1601 attr[max_dump_count] = None |
1510 for index, x in enumerate(attr[1:]): | 1602 for index, x in enumerate(attr[1:]): |
1511 out.write(' #%0*d: ' % (max_dump_count_digit, index + 1)) | 1603 out.write(' #%0*d: ' % (max_dump_count_digit, index + 1)) |
1512 if not x: | 1604 if not x: |
1513 out.write('None\n') | 1605 out.write('None\n') |
1514 elif x[0] == 'hooked': | 1606 elif x[0] == 'hooked': |
1515 attrs = x[1].split() | 1607 region_info = x[1] |
1516 assert len(attrs) == 3 | 1608 bucket_id = region_info['bucket_id'] |
1517 bucket_id = int(attrs[2]) | |
1518 bucket = bucket_set.get(bucket_id) | 1609 bucket = bucket_set.get(bucket_id) |
1519 component = policy.find(bucket) | 1610 component = policy.find(bucket) |
1520 out.write('hooked %s: %s @ %d\n' % (attrs[0], component, bucket_id)) | 1611 out.write('hooked %s: %s @ %d\n' % ( |
| 1612 region_info['type'], component, bucket_id)) |
1521 else: | 1613 else: |
1522 attrs = x[1].split() | 1614 region_info = x[1] |
1523 size = int(attrs[1]) | 1615 size = region_info['committed'] |
1524 out.write('unhooked %s: %d bytes committed\n' % (attrs[0], size)) | 1616 out.write('unhooked %s: %d bytes committed\n' % ( |
| 1617 region_info['type'], size)) |
1525 | 1618 |
1526 | 1619 |
1527 class ExpandCommand(Command): | 1620 class ExpandCommand(Command): |
1528 def __init__(self): | 1621 def __init__(self): |
1529 super(ExpandCommand, self).__init__( | 1622 super(ExpandCommand, self).__init__( |
1530 'Usage: %prog expand <dump> <policy> <component> <depth>') | 1623 'Usage: %prog expand <dump> <policy> <component> <depth>') |
1531 | 1624 |
1532 def do(self, sys_argv): | 1625 def do(self, sys_argv): |
1533 _, args = self._parse_args(sys_argv, 4) | 1626 _, args = self._parse_args(sys_argv, 4) |
1534 dump_path = args[1] | 1627 dump_path = args[1] |
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1814 errorcode = COMMANDS[action]().do(sys.argv) | 1907 errorcode = COMMANDS[action]().do(sys.argv) |
1815 except ParsingException, e: | 1908 except ParsingException, e: |
1816 errorcode = 1 | 1909 errorcode = 1 |
1817 sys.stderr.write('Exit by parsing error: %s\n' % e) | 1910 sys.stderr.write('Exit by parsing error: %s\n' % e) |
1818 | 1911 |
1819 return errorcode | 1912 return errorcode |
1820 | 1913 |
1821 | 1914 |
1822 if __name__ == '__main__': | 1915 if __name__ == '__main__': |
1823 sys.exit(main()) | 1916 sys.exit(main()) |
OLD | NEW |