OLD | NEW |
1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """The deep heap profiler script for Chrome.""" | 5 """The deep heap profiler script for Chrome.""" |
6 | 6 |
7 import copy | 7 import copy |
8 import datetime | 8 import datetime |
9 import json | 9 import json |
10 import logging | 10 import logging |
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
369 len(self._symbol_mapping_caches[symbol_type])) | 369 len(self._symbol_mapping_caches[symbol_type])) |
370 except IOError as e: | 370 except IOError as e: |
371 LOGGER.info('The symbol cache file is invalid: %s' % e) | 371 LOGGER.info('The symbol cache file is invalid: %s' % e) |
372 | 372 |
373 | 373 |
374 class Rule(object): | 374 class Rule(object): |
375 """Represents one matching rule in a policy file.""" | 375 """Represents one matching rule in a policy file.""" |
376 | 376 |
377 def __init__(self, | 377 def __init__(self, |
378 name, | 378 name, |
379 mmap, | 379 allocator_type, |
380 stackfunction_pattern=None, | 380 stackfunction_pattern=None, |
381 stacksourcefile_pattern=None, | 381 stacksourcefile_pattern=None, |
382 typeinfo_pattern=None): | 382 typeinfo_pattern=None, |
| 383 mappedpathname_pattern=None, |
| 384 mappedpermission_pattern=None): |
383 self._name = name | 385 self._name = name |
384 self._mmap = mmap | 386 self._allocator_type = allocator_type |
385 | 387 |
386 self._stackfunction_pattern = None | 388 self._stackfunction_pattern = None |
387 if stackfunction_pattern: | 389 if stackfunction_pattern: |
388 self._stackfunction_pattern = re.compile( | 390 self._stackfunction_pattern = re.compile( |
389 stackfunction_pattern + r'\Z') | 391 stackfunction_pattern + r'\Z') |
390 | 392 |
391 self._stacksourcefile_pattern = None | 393 self._stacksourcefile_pattern = None |
392 if stacksourcefile_pattern: | 394 if stacksourcefile_pattern: |
393 self._stacksourcefile_pattern = re.compile( | 395 self._stacksourcefile_pattern = re.compile( |
394 stacksourcefile_pattern + r'\Z') | 396 stacksourcefile_pattern + r'\Z') |
395 | 397 |
396 self._typeinfo_pattern = None | 398 self._typeinfo_pattern = None |
397 if typeinfo_pattern: | 399 if typeinfo_pattern: |
398 self._typeinfo_pattern = re.compile(typeinfo_pattern + r'\Z') | 400 self._typeinfo_pattern = re.compile(typeinfo_pattern + r'\Z') |
399 | 401 |
| 402 self._mappedpathname_pattern = None |
| 403 if mappedpathname_pattern: |
| 404 self._mappedpathname_pattern = re.compile(mappedpathname_pattern + r'\Z') |
| 405 |
| 406 self._mappedpermission_pattern = None |
| 407 if mappedpermission_pattern: |
| 408 self._mappedpermission_pattern = re.compile( |
| 409 mappedpermission_pattern + r'\Z') |
| 410 |
400 @property | 411 @property |
401 def name(self): | 412 def name(self): |
402 return self._name | 413 return self._name |
403 | 414 |
404 @property | 415 @property |
405 def mmap(self): | 416 def allocator_type(self): |
406 return self._mmap | 417 return self._allocator_type |
407 | 418 |
408 @property | 419 @property |
409 def stackfunction_pattern(self): | 420 def stackfunction_pattern(self): |
410 return self._stackfunction_pattern | 421 return self._stackfunction_pattern |
411 | 422 |
412 @property | 423 @property |
413 def stacksourcefile_pattern(self): | 424 def stacksourcefile_pattern(self): |
414 return self._stacksourcefile_pattern | 425 return self._stacksourcefile_pattern |
415 | 426 |
416 @property | 427 @property |
417 def typeinfo_pattern(self): | 428 def typeinfo_pattern(self): |
418 return self._typeinfo_pattern | 429 return self._typeinfo_pattern |
419 | 430 |
| 431 @property |
| 432 def mappedpathname_pattern(self): |
| 433 return self._mappedpathname_pattern |
| 434 |
| 435 @property |
| 436 def mappedpermission_pattern(self): |
| 437 return self._mappedpermission_pattern |
| 438 |
420 | 439 |
421 class Policy(object): | 440 class Policy(object): |
422 """Represents a policy, a content of a policy file.""" | 441 """Represents a policy, a content of a policy file.""" |
423 | 442 |
424 def __init__(self, rules, version, components): | 443 def __init__(self, rules, version, components): |
425 self._rules = rules | 444 self._rules = rules |
426 self._version = version | 445 self._version = version |
427 self._components = components | 446 self._components = components |
428 | 447 |
429 @property | 448 @property |
(...skipping 22 matching lines...) Expand all Loading... |
452 if bucket.component_cache: | 471 if bucket.component_cache: |
453 return bucket.component_cache | 472 return bucket.component_cache |
454 | 473 |
455 stackfunction = bucket.symbolized_joined_stackfunction | 474 stackfunction = bucket.symbolized_joined_stackfunction |
456 stacksourcefile = bucket.symbolized_joined_stacksourcefile | 475 stacksourcefile = bucket.symbolized_joined_stacksourcefile |
457 typeinfo = bucket.symbolized_typeinfo | 476 typeinfo = bucket.symbolized_typeinfo |
458 if typeinfo.startswith('0x'): | 477 if typeinfo.startswith('0x'): |
459 typeinfo = bucket.typeinfo_name | 478 typeinfo = bucket.typeinfo_name |
460 | 479 |
461 for rule in self._rules: | 480 for rule in self._rules: |
462 if (bucket.mmap == rule.mmap and | 481 if (bucket.allocator_type == rule.allocator_type and |
463 (not rule.stackfunction_pattern or | 482 (not rule.stackfunction_pattern or |
464 rule.stackfunction_pattern.match(stackfunction)) and | 483 rule.stackfunction_pattern.match(stackfunction)) and |
465 (not rule.stacksourcefile_pattern or | 484 (not rule.stacksourcefile_pattern or |
466 rule.stacksourcefile_pattern.match(stacksourcefile)) and | 485 rule.stacksourcefile_pattern.match(stacksourcefile)) and |
467 (not rule.typeinfo_pattern or rule.typeinfo_pattern.match(typeinfo))): | 486 (not rule.typeinfo_pattern or rule.typeinfo_pattern.match(typeinfo))): |
468 bucket.component_cache = rule.name | 487 bucket.component_cache = rule.name |
469 return rule.name | 488 return rule.name |
470 | 489 |
471 assert False | 490 assert False |
472 | 491 |
| 492 def find_unhooked(self, region): |
| 493 for rule in self._rules: |
| 494 if (region[0] == 'unhooked' and |
| 495 rule.allocator_type == 'unhooked' and |
| 496 (not rule.mappedpathname_pattern or |
| 497 rule.mappedpathname_pattern.match(region[1]['vma']['name'])) and |
| 498 (not rule.mappedpermission_pattern or |
| 499 rule.mappedpermission_pattern.match( |
| 500 region[1]['vma']['readable'] + |
| 501 region[1]['vma']['writable'] + |
| 502 region[1]['vma']['executable'] + |
| 503 region[1]['vma']['private']))): |
| 504 return rule.name |
| 505 |
| 506 assert False |
| 507 |
473 @staticmethod | 508 @staticmethod |
474 def load(filename, filetype): | 509 def load(filename, filetype): |
475 """Loads a policy file of |filename| in a |format|. | 510 """Loads a policy file of |filename| in a |format|. |
476 | 511 |
477 Args: | 512 Args: |
478 filename: A filename to be loaded. | 513 filename: A filename to be loaded. |
479 filetype: A string to specify a type of the file. Only 'json' is | 514 filetype: A string to specify a type of the file. Only 'json' is |
480 supported for now. | 515 supported for now. |
481 | 516 |
482 Returns: | 517 Returns: |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
517 A loaded policy object. | 552 A loaded policy object. |
518 """ | 553 """ |
519 policy = json.load(policy_f) | 554 policy = json.load(policy_f) |
520 | 555 |
521 rules = [] | 556 rules = [] |
522 for rule in policy['rules']: | 557 for rule in policy['rules']: |
523 stackfunction = rule.get('stackfunction') or rule.get('stacktrace') | 558 stackfunction = rule.get('stackfunction') or rule.get('stacktrace') |
524 stacksourcefile = rule.get('stacksourcefile') | 559 stacksourcefile = rule.get('stacksourcefile') |
525 rules.append(Rule( | 560 rules.append(Rule( |
526 rule['name'], | 561 rule['name'], |
527 rule['allocator'] == 'mmap', | 562 rule['allocator'], # allocator_type |
528 stackfunction, | 563 stackfunction, |
529 stacksourcefile, | 564 stacksourcefile, |
530 rule['typeinfo'] if 'typeinfo' in rule else None)) | 565 rule['typeinfo'] if 'typeinfo' in rule else None, |
| 566 rule.get('mappedpathname'), |
| 567 rule.get('mappedpermission'))) |
531 | 568 |
532 return Policy(rules, policy['version'], policy['components']) | 569 return Policy(rules, policy['version'], policy['components']) |
533 | 570 |
534 | 571 |
535 class PolicySet(object): | 572 class PolicySet(object): |
536 """Represents a set of policies.""" | 573 """Represents a set of policies.""" |
537 | 574 |
538 def __init__(self, policy_directory): | 575 def __init__(self, policy_directory): |
539 self._policy_directory = policy_directory | 576 self._policy_directory = policy_directory |
540 | 577 |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
590 LOGGER.info(' %s: %s' % (label, directory[label]['file'])) | 627 LOGGER.info(' %s: %s' % (label, directory[label]['file'])) |
591 loaded = Policy.load(directory[label]['file'], directory[label]['format']) | 628 loaded = Policy.load(directory[label]['file'], directory[label]['format']) |
592 if loaded: | 629 if loaded: |
593 policies[label] = loaded | 630 policies[label] = loaded |
594 return PolicySet(policies) | 631 return PolicySet(policies) |
595 | 632 |
596 | 633 |
597 class Bucket(object): | 634 class Bucket(object): |
598 """Represents a bucket, which is a unit of memory block classification.""" | 635 """Represents a bucket, which is a unit of memory block classification.""" |
599 | 636 |
600 def __init__(self, stacktrace, mmap, typeinfo, typeinfo_name): | 637 def __init__(self, stacktrace, allocator_type, typeinfo, typeinfo_name): |
601 self._stacktrace = stacktrace | 638 self._stacktrace = stacktrace |
602 self._mmap = mmap | 639 self._allocator_type = allocator_type |
603 self._typeinfo = typeinfo | 640 self._typeinfo = typeinfo |
604 self._typeinfo_name = typeinfo_name | 641 self._typeinfo_name = typeinfo_name |
605 | 642 |
606 self._symbolized_stackfunction = stacktrace | 643 self._symbolized_stackfunction = stacktrace |
607 self._symbolized_joined_stackfunction = '' | 644 self._symbolized_joined_stackfunction = '' |
608 self._symbolized_stacksourcefile = stacktrace | 645 self._symbolized_stacksourcefile = stacktrace |
609 self._symbolized_joined_stacksourcefile = '' | 646 self._symbolized_joined_stacksourcefile = '' |
610 self._symbolized_typeinfo = typeinfo_name | 647 self._symbolized_typeinfo = typeinfo_name |
611 | 648 |
612 self.component_cache = '' | 649 self.component_cache = '' |
613 | 650 |
614 def __str__(self): | 651 def __str__(self): |
615 result = [] | 652 result = [] |
616 result.append('mmap' if self._mmap else 'malloc') | 653 result.append(self._allocator_type) |
617 if self._symbolized_typeinfo == 'no typeinfo': | 654 if self._symbolized_typeinfo == 'no typeinfo': |
618 result.append('tno_typeinfo') | 655 result.append('tno_typeinfo') |
619 else: | 656 else: |
620 result.append('t' + self._symbolized_typeinfo) | 657 result.append('t' + self._symbolized_typeinfo) |
621 result.append('n' + self._typeinfo_name) | 658 result.append('n' + self._typeinfo_name) |
622 result.extend(['%s(@%s)' % (function, sourcefile) | 659 result.extend(['%s(@%s)' % (function, sourcefile) |
623 for function, sourcefile | 660 for function, sourcefile |
624 in zip(self._symbolized_stackfunction, | 661 in zip(self._symbolized_stackfunction, |
625 self._symbolized_stacksourcefile)]) | 662 self._symbolized_stacksourcefile)]) |
626 return ' '.join(result) | 663 return ' '.join(result) |
(...skipping 24 matching lines...) Expand all Loading... |
651 self._symbolized_typeinfo = 'no typeinfo' | 688 self._symbolized_typeinfo = 'no typeinfo' |
652 | 689 |
653 def clear_component_cache(self): | 690 def clear_component_cache(self): |
654 self.component_cache = '' | 691 self.component_cache = '' |
655 | 692 |
656 @property | 693 @property |
657 def stacktrace(self): | 694 def stacktrace(self): |
658 return self._stacktrace | 695 return self._stacktrace |
659 | 696 |
660 @property | 697 @property |
661 def mmap(self): | 698 def allocator_type(self): |
662 return self._mmap | 699 return self._allocator_type |
663 | 700 |
664 @property | 701 @property |
665 def typeinfo(self): | 702 def typeinfo(self): |
666 return self._typeinfo | 703 return self._typeinfo |
667 | 704 |
668 @property | 705 @property |
669 def typeinfo_name(self): | 706 def typeinfo_name(self): |
670 return self._typeinfo_name | 707 return self._typeinfo_name |
671 | 708 |
672 @property | 709 @property |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
732 self._typeinfo_addresses.add(typeinfo) | 769 self._typeinfo_addresses.add(typeinfo) |
733 elif word[0] == 'n': | 770 elif word[0] == 'n': |
734 typeinfo_name = word[1:] | 771 typeinfo_name = word[1:] |
735 else: | 772 else: |
736 stacktrace_begin = index | 773 stacktrace_begin = index |
737 break | 774 break |
738 stacktrace = [int(address, 16) for address in words[stacktrace_begin:]] | 775 stacktrace = [int(address, 16) for address in words[stacktrace_begin:]] |
739 for frame in stacktrace: | 776 for frame in stacktrace: |
740 self._code_addresses.add(frame) | 777 self._code_addresses.add(frame) |
741 self._buckets[int(words[0])] = Bucket( | 778 self._buckets[int(words[0])] = Bucket( |
742 stacktrace, words[1] == 'mmap', typeinfo, typeinfo_name) | 779 stacktrace, words[1], typeinfo, typeinfo_name) |
743 | 780 |
744 def __iter__(self): | 781 def __iter__(self): |
745 for bucket_id, bucket_content in self._buckets.iteritems(): | 782 for bucket_id, bucket_content in self._buckets.iteritems(): |
746 yield bucket_id, bucket_content | 783 yield bucket_id, bucket_content |
747 | 784 |
748 def __getitem__(self, bucket_id): | 785 def __getitem__(self, bucket_id): |
749 return self._buckets[bucket_id] | 786 return self._buckets[bucket_id] |
750 | 787 |
751 def get(self, bucket_id): | 788 def get(self, bucket_id): |
752 return self._buckets.get(bucket_id) | 789 return self._buckets.get(bucket_id) |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
785 | 822 |
786 class Dump(object): | 823 class Dump(object): |
787 """Represents a heap profile dump.""" | 824 """Represents a heap profile dump.""" |
788 | 825 |
789 _PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$') | 826 _PATH_PATTERN = re.compile(r'^(.*)\.([0-9]+)\.([0-9]+)\.heap$') |
790 | 827 |
791 _HOOK_PATTERN = re.compile( | 828 _HOOK_PATTERN = re.compile( |
792 r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+' | 829 r'^ ([ \(])([a-f0-9]+)([ \)])-([ \(])([a-f0-9]+)([ \)])\s+' |
793 r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE) | 830 r'(hooked|unhooked)\s+(.+)$', re.IGNORECASE) |
794 | 831 |
| 832 _HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / ' |
| 833 '(?P<RESERVED>[0-9]+) @ (?P<BUCKETID>[0-9]+)') |
| 834 _UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+ )?(?P<COMMITTED>[0-9]+) / ' |
| 835 '(?P<RESERVED>[0-9]+)') |
| 836 |
| 837 _OLD_HOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) @ (?P<BUCKETID>[0-9]+)') |
| 838 _OLD_UNHOOKED_PATTERN = re.compile(r'(?P<TYPE>.+) (?P<COMMITTED>[0-9]+)') |
| 839 |
795 _TIME_PATTERN_FORMAT = re.compile( | 840 _TIME_PATTERN_FORMAT = re.compile( |
796 r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?') | 841 r'^Time: ([0-9]+/[0-9]+/[0-9]+ [0-9]+:[0-9]+:[0-9]+)(\.[0-9]+)?') |
797 _TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$') | 842 _TIME_PATTERN_SECONDS = re.compile(r'^Time: ([0-9]+)$') |
798 | 843 |
799 def __init__(self, path, modified_time): | 844 def __init__(self, path, modified_time): |
800 self._path = path | 845 self._path = path |
801 matched = self._PATH_PATTERN.match(path) | 846 matched = self._PATH_PATTERN.match(path) |
802 self._pid = int(matched.group(2)) | 847 self._pid = int(matched.group(2)) |
803 self._count = int(matched.group(3)) | 848 self._count = int(matched.group(3)) |
804 self._time = modified_time | 849 self._time = modified_time |
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
966 def _parse_mmap_list(self): | 1011 def _parse_mmap_list(self): |
967 """Parses lines in self._lines as a mmap list.""" | 1012 """Parses lines in self._lines as a mmap list.""" |
968 (ln, found) = skip_while( | 1013 (ln, found) = skip_while( |
969 0, len(self._lines), | 1014 0, len(self._lines), |
970 lambda n: self._lines[n] != 'MMAP_LIST:\n') | 1015 lambda n: self._lines[n] != 'MMAP_LIST:\n') |
971 if not found: | 1016 if not found: |
972 return {} | 1017 return {} |
973 | 1018 |
974 ln += 1 | 1019 ln += 1 |
975 self._map = {} | 1020 self._map = {} |
| 1021 current_vma = dict() |
976 while True: | 1022 while True: |
977 entry = proc_maps.ProcMaps.parse_line(self._lines[ln]) | 1023 entry = proc_maps.ProcMaps.parse_line(self._lines[ln]) |
978 if entry: | 1024 if entry: |
| 1025 current_vma = dict() |
979 for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end): | 1026 for _, _, attr in self._procmaps.iter_range(entry.begin, entry.end): |
980 for key, value in entry.as_dict().iteritems(): | 1027 for key, value in entry.as_dict().iteritems(): |
981 attr[key] = value | 1028 attr[key] = value |
| 1029 current_vma[key] = value |
982 ln += 1 | 1030 ln += 1 |
983 continue | 1031 continue |
984 matched = self._HOOK_PATTERN.match(self._lines[ln]) | 1032 matched = self._HOOK_PATTERN.match(self._lines[ln]) |
985 if not matched: | 1033 if not matched: |
986 break | 1034 break |
987 # 2: starting address | 1035 # 2: starting address |
988 # 5: end address | 1036 # 5: end address |
989 # 7: hooked or unhooked | 1037 # 7: hooked or unhooked |
990 # 8: additional information | 1038 # 8: additional information |
| 1039 if matched.group(7) == 'hooked': |
| 1040 submatched = self._HOOKED_PATTERN.match(matched.group(8)) |
| 1041 if not submatched: |
| 1042 submatched = self._OLD_HOOKED_PATTERN.match(matched.group(8)) |
| 1043 elif matched.group(7) == 'unhooked': |
| 1044 submatched = self._UNHOOKED_PATTERN.match(matched.group(8)) |
| 1045 if not submatched: |
| 1046 submatched = self._OLD_UNHOOKED_PATTERN.match(matched.group(8)) |
| 1047 else: |
| 1048 assert matched.group(7) in ['hooked', 'unhooked'] |
| 1049 |
| 1050 submatched_dict = submatched.groupdict() |
| 1051 region_info = { 'vma': current_vma } |
| 1052 if 'TYPE' in submatched_dict: |
| 1053 region_info['type'] = submatched_dict['TYPE'].strip() |
| 1054 if 'COMMITTED' in submatched_dict: |
| 1055 region_info['committed'] = int(submatched_dict['COMMITTED']) |
| 1056 if 'RESERVED' in submatched_dict: |
| 1057 region_info['reserved'] = int(submatched_dict['RESERVED']) |
| 1058 if 'BUCKETID' in submatched_dict: |
| 1059 region_info['bucket_id'] = int(submatched_dict['BUCKETID']) |
| 1060 |
991 self._map[(int(matched.group(2), 16), | 1061 self._map[(int(matched.group(2), 16), |
992 int(matched.group(5), 16))] = (matched.group(7), | 1062 int(matched.group(5), 16))] = (matched.group(7), region_info) |
993 matched.group(8)) | |
994 ln += 1 | 1063 ln += 1 |
995 | 1064 |
996 def _extract_stacktrace_lines(self, line_number): | 1065 def _extract_stacktrace_lines(self, line_number): |
997 """Extracts the position of stacktrace lines. | 1066 """Extracts the position of stacktrace lines. |
998 | 1067 |
999 Valid stacktrace lines are stored into self._stacktrace_lines. | 1068 Valid stacktrace lines are stored into self._stacktrace_lines. |
1000 | 1069 |
1001 Args: | 1070 Args: |
1002 line_number: A line number to start parsing in lines. | 1071 line_number: A line number to start parsing in lines. |
1003 | 1072 |
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1296 first_dump_time: An integer representing time when the first dump is | 1365 first_dump_time: An integer representing time when the first dump is |
1297 dumped. | 1366 dumped. |
1298 | 1367 |
1299 Returns: | 1368 Returns: |
1300 A dict mapping components and their corresponding sizes. | 1369 A dict mapping components and their corresponding sizes. |
1301 """ | 1370 """ |
1302 LOGGER.info(' %s' % dump.path) | 1371 LOGGER.info(' %s' % dump.path) |
1303 sizes = dict((c, 0) for c in policy.components) | 1372 sizes = dict((c, 0) for c in policy.components) |
1304 | 1373 |
1305 PolicyCommands._accumulate(dump, policy, bucket_set, sizes) | 1374 PolicyCommands._accumulate(dump, policy, bucket_set, sizes) |
| 1375 PolicyCommands._accumulate_maps(dump, policy, sizes) |
1306 | 1376 |
1307 sizes['mmap-no-log'] = ( | 1377 sizes['mmap-no-log'] = ( |
1308 dump.global_stat('profiled-mmap_committed') - | 1378 dump.global_stat('profiled-mmap_committed') - |
1309 sizes['mmap-total-log']) | 1379 sizes['mmap-total-log']) |
1310 sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed') | 1380 sizes['mmap-total-record'] = dump.global_stat('profiled-mmap_committed') |
1311 sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual') | 1381 sizes['mmap-total-record-vm'] = dump.global_stat('profiled-mmap_virtual') |
1312 | 1382 |
1313 sizes['tc-no-log'] = ( | 1383 sizes['tc-no-log'] = ( |
1314 dump.global_stat('profiled-malloc_committed') - | 1384 dump.global_stat('profiled-malloc_committed') - |
1315 sizes['tc-total-log']) | 1385 sizes['tc-total-log']) |
1316 sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed') | 1386 sizes['tc-total-record'] = dump.global_stat('profiled-malloc_committed') |
1317 sizes['tc-unused'] = ( | 1387 sizes['tc-unused'] = ( |
1318 sizes['mmap-tcmalloc'] - | 1388 sizes['mmap-tcmalloc'] - |
1319 dump.global_stat('profiled-malloc_committed')) | 1389 dump.global_stat('profiled-malloc_committed')) |
| 1390 if sizes['tc-unused'] < 0: |
| 1391 LOGGER.warn(' Assuming tc-unused=0 as it is negative: %d (bytes)' % |
| 1392 sizes['tc-unused']) |
| 1393 sizes['tc-unused'] = 0 |
1320 sizes['tc-total'] = sizes['mmap-tcmalloc'] | 1394 sizes['tc-total'] = sizes['mmap-tcmalloc'] |
1321 | 1395 |
1322 for key, value in { | 1396 for key, value in { |
1323 'total': 'total_committed', | 1397 'total': 'total_committed', |
1324 'filemapped': 'file_committed', | 1398 'filemapped': 'file_committed', |
1325 'absent': 'absent_committed', | 1399 'absent': 'absent_committed', |
1326 'file-exec': 'file-exec_committed', | 1400 'file-exec': 'file-exec_committed', |
1327 'file-nonexec': 'file-nonexec_committed', | 1401 'file-nonexec': 'file-nonexec_committed', |
1328 'anonymous': 'anonymous_committed', | 1402 'anonymous': 'anonymous_committed', |
1329 'stack': 'stack_committed', | 1403 'stack': 'stack_committed', |
1330 'other': 'other_committed', | 1404 'other': 'other_committed', |
1331 'unhooked-absent': 'nonprofiled-absent_committed', | 1405 'unhooked-absent': 'nonprofiled-absent_committed', |
1332 'unhooked-anonymous': 'nonprofiled-anonymous_committed', | |
1333 'unhooked-file-exec': 'nonprofiled-file-exec_committed', | |
1334 'unhooked-file-nonexec': 'nonprofiled-file-nonexec_committed', | |
1335 'unhooked-stack': 'nonprofiled-stack_committed', | |
1336 'unhooked-other': 'nonprofiled-other_committed', | |
1337 'total-vm': 'total_virtual', | 1406 'total-vm': 'total_virtual', |
1338 'filemapped-vm': 'file_virtual', | 1407 'filemapped-vm': 'file_virtual', |
1339 'anonymous-vm': 'anonymous_virtual', | 1408 'anonymous-vm': 'anonymous_virtual', |
1340 'other-vm': 'other_virtual' }.iteritems(): | 1409 'other-vm': 'other_virtual' }.iteritems(): |
1341 if key in sizes: | 1410 if key in sizes: |
1342 sizes[key] = dump.global_stat(value) | 1411 sizes[key] = dump.global_stat(value) |
1343 | 1412 |
1344 if 'mustbezero' in sizes: | 1413 if 'mustbezero' in sizes: |
1345 removed_list = ( | 1414 removed_list = ( |
1346 'profiled-mmap_committed', | 1415 'profiled-mmap_committed', |
(...skipping 27 matching lines...) Expand all Loading... |
1374 component_match = policy.find(bucket) | 1443 component_match = policy.find(bucket) |
1375 sizes[component_match] += int(words[COMMITTED]) | 1444 sizes[component_match] += int(words[COMMITTED]) |
1376 | 1445 |
1377 if component_match.startswith('tc-'): | 1446 if component_match.startswith('tc-'): |
1378 sizes['tc-total-log'] += int(words[COMMITTED]) | 1447 sizes['tc-total-log'] += int(words[COMMITTED]) |
1379 elif component_match.startswith('mmap-'): | 1448 elif component_match.startswith('mmap-'): |
1380 sizes['mmap-total-log'] += int(words[COMMITTED]) | 1449 sizes['mmap-total-log'] += int(words[COMMITTED]) |
1381 else: | 1450 else: |
1382 sizes['other-total-log'] += int(words[COMMITTED]) | 1451 sizes['other-total-log'] += int(words[COMMITTED]) |
1383 | 1452 |
| 1453 @staticmethod |
| 1454 def _accumulate_maps(dump, policy, sizes): |
| 1455 for _, value in dump.iter_map: |
| 1456 if value[0] == 'unhooked': |
| 1457 component_match = policy.find_unhooked(value) |
| 1458 sizes[component_match] += int(value[1]['committed']) |
| 1459 |
1384 | 1460 |
1385 class CSVCommand(PolicyCommands): | 1461 class CSVCommand(PolicyCommands): |
1386 def __init__(self): | 1462 def __init__(self): |
1387 super(CSVCommand, self).__init__('csv') | 1463 super(CSVCommand, self).__init__('csv') |
1388 | 1464 |
1389 def do(self, sys_argv): | 1465 def do(self, sys_argv): |
1390 policy_set, dumps, bucket_set = self._set_up(sys_argv) | 1466 policy_set, dumps, bucket_set = self._set_up(sys_argv) |
1391 return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout) | 1467 return CSVCommand._output(policy_set, dumps, bucket_set, sys.stdout) |
1392 | 1468 |
1393 @staticmethod | 1469 @staticmethod |
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1520 max_dump_count_digit = len(str(max_dump_count)) | 1596 max_dump_count_digit = len(str(max_dump_count)) |
1521 for begin, end, attr in range_dict.iter_range(): | 1597 for begin, end, attr in range_dict.iter_range(): |
1522 out.write('%x-%x\n' % (begin, end)) | 1598 out.write('%x-%x\n' % (begin, end)) |
1523 if len(attr) < max_dump_count: | 1599 if len(attr) < max_dump_count: |
1524 attr[max_dump_count] = None | 1600 attr[max_dump_count] = None |
1525 for index, x in enumerate(attr[1:]): | 1601 for index, x in enumerate(attr[1:]): |
1526 out.write(' #%0*d: ' % (max_dump_count_digit, index + 1)) | 1602 out.write(' #%0*d: ' % (max_dump_count_digit, index + 1)) |
1527 if not x: | 1603 if not x: |
1528 out.write('None\n') | 1604 out.write('None\n') |
1529 elif x[0] == 'hooked': | 1605 elif x[0] == 'hooked': |
1530 attrs = x[1].split() | 1606 region_info = x[1] |
1531 assert len(attrs) == 3 | 1607 bucket_id = region_info['bucket_id'] |
1532 bucket_id = int(attrs[2]) | |
1533 bucket = bucket_set.get(bucket_id) | 1608 bucket = bucket_set.get(bucket_id) |
1534 component = policy.find(bucket) | 1609 component = policy.find(bucket) |
1535 out.write('hooked %s: %s @ %d\n' % (attrs[0], component, bucket_id)) | 1610 out.write('hooked %s: %s @ %d\n' % ( |
| 1611 region_info['type'] if 'type' in region_info else 'None', |
| 1612 component, bucket_id)) |
1536 else: | 1613 else: |
1537 attrs = x[1].split() | 1614 region_info = x[1] |
1538 size = int(attrs[1]) | 1615 size = region_info['committed'] |
1539 out.write('unhooked %s: %d bytes committed\n' % (attrs[0], size)) | 1616 out.write('unhooked %s: %d bytes committed\n' % ( |
| 1617 region_info['type'] if 'type' in region_info else 'None', size)) |
1540 | 1618 |
1541 | 1619 |
1542 class ExpandCommand(Command): | 1620 class ExpandCommand(Command): |
1543 def __init__(self): | 1621 def __init__(self): |
1544 super(ExpandCommand, self).__init__( | 1622 super(ExpandCommand, self).__init__( |
1545 'Usage: %prog expand <dump> <policy> <component> <depth>') | 1623 'Usage: %prog expand <dump> <policy> <component> <depth>') |
1546 | 1624 |
1547 def do(self, sys_argv): | 1625 def do(self, sys_argv): |
1548 _, args = self._parse_args(sys_argv, 4) | 1626 _, args = self._parse_args(sys_argv, 4) |
1549 dump_path = args[1] | 1627 dump_path = args[1] |
(...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1829 errorcode = COMMANDS[action]().do(sys.argv) | 1907 errorcode = COMMANDS[action]().do(sys.argv) |
1830 except ParsingException, e: | 1908 except ParsingException, e: |
1831 errorcode = 1 | 1909 errorcode = 1 |
1832 sys.stderr.write('Exit by parsing error: %s\n' % e) | 1910 sys.stderr.write('Exit by parsing error: %s\n' % e) |
1833 | 1911 |
1834 return errorcode | 1912 return errorcode |
1835 | 1913 |
1836 | 1914 |
1837 if __name__ == '__main__': | 1915 if __name__ == '__main__': |
1838 sys.exit(main()) | 1916 sys.exit(main()) |
OLD | NEW |