OLD | NEW |
---|---|
(Empty) | |
1 # Copyright 2013 The Chromium Authors. All rights reserved. | |
2 # Use of this source code is governed by a BSD-style license that can be | |
3 # found in the LICENSE file. | |
4 | |
5 import json | |
6 import logging | |
7 import sys | |
8 | |
9 from lib.ordered_dict import OrderedDict | |
10 from lib.subcommand import SubCommand | |
11 from lib.sorter import MallocUnit, MMapUnit, SorterSet, UnhookedUnit, UnitSet | |
12 | |
13 | |
14 BUCKET_ID = 5 | |
peria
2013/07/16 10:30:11
Cannot those constants be imported from Bucket lik
Dai Mikurube (NOT FULLTIME)
2013/07/16 10:42:39
Done.
| |
15 VIRTUAL = 0 | |
16 COMMITTED = 1 | |
17 ALLOC_COUNT = 2 | |
18 FREE_COUNT = 3 | |
19 | |
20 LOGGER = logging.getLogger('dmprof') | |
21 | |
22 | |
23 class CatCommand(SubCommand): | |
24 def __init__(self): | |
25 super(CatCommand, self).__init__('Usage: %prog cat <first-dump>') | |
26 self._parser.add_option('--alternative-dirs', dest='alternative_dirs', | |
27 metavar='/path/on/target@/path/on/host[:...]', | |
28 help='Read files in /path/on/host/ instead of ' | |
29 'files in /path/on/target/.') | |
30 self._parser.add_option('--indent', dest='indent', action='store_true', | |
31 help='Indent the output.') | |
32 | |
33 def do(self, sys_argv): | |
34 options, args = self._parse_args(sys_argv, 1) | |
35 dump_path = args[1] | |
36 # TODO(dmikurube): Support shared memory. | |
37 alternative_dirs_dict = {} | |
38 if options.alternative_dirs: | |
39 for alternative_dir_pair in options.alternative_dirs.split(':'): | |
40 target_path, host_path = alternative_dir_pair.split('@', 1) | |
41 alternative_dirs_dict[target_path] = host_path | |
42 (bucket_set, dumps) = SubCommand.load_basic_files( | |
43 dump_path, True, alternative_dirs=alternative_dirs_dict) | |
44 | |
45 json_root = OrderedDict() | |
46 json_root['version'] = 1 | |
47 json_root['run_id'] = None | |
48 for dump in dumps: | |
49 if json_root['run_id'] and json_root['run_id'] != dump.run_id: | |
50 LOGGER.error('Inconsistent heap profile dumps.') | |
51 json_root['run_id'] = '' | |
52 break | |
53 json_root['run_id'] = dump.run_id | |
54 json_root['snapshots'] = [] | |
55 | |
56 # Load all sorters. | |
57 sorters = SorterSet() | |
58 | |
59 for dump in dumps: | |
60 json_root['snapshots'].append( | |
61 self._fill_snapshot(dump, bucket_set, sorters)) | |
62 | |
63 if options.indent: | |
64 json.dump(json_root, sys.stdout, indent=2) | |
65 else: | |
66 json.dump(json_root, sys.stdout) | |
67 print '' | |
68 | |
69 @staticmethod | |
70 def _fill_snapshot(dump, bucket_set, sorters): | |
71 root = OrderedDict() | |
72 root['time'] = dump.time | |
73 root['worlds'] = OrderedDict() | |
74 root['worlds']['vm'] = CatCommand._fill_world( | |
75 dump, bucket_set, sorters, 'vm') | |
76 root['worlds']['malloc'] = CatCommand._fill_world( | |
77 dump, bucket_set, sorters, 'malloc') | |
78 return root | |
79 | |
80 @staticmethod | |
81 def _fill_world(dump, bucket_set, sorters, world): | |
82 root = OrderedDict() | |
83 | |
84 root['name'] = 'world' | |
85 if world == 'vm': | |
86 root['unit_fields'] = ['committed', 'reserved'] | |
87 elif world == 'malloc': | |
88 root['unit_fields'] = ['size', 'alloc_count', 'free_count'] | |
89 | |
90 # Make { vm | malloc } units with their sizes. | |
91 root['units'] = OrderedDict() | |
92 unit_set = UnitSet(world) | |
93 if world == 'vm': | |
94 for unit in CatCommand._iterate_vm_unit(dump, None, bucket_set): | |
95 unit_set.append(unit) | |
96 for unit in unit_set: | |
97 root['units'][unit.unit_id] = [unit.committed, unit.reserved] | |
98 elif world == 'malloc': | |
99 for unit in CatCommand._iterate_malloc_unit(dump, bucket_set): | |
100 unit_set.append(unit) | |
101 for unit in unit_set: | |
102 root['units'][unit.unit_id] = [ | |
103 unit.size, unit.alloc_count, unit.free_count] | |
104 | |
105 # Iterate for { vm | malloc } sorters. | |
106 root['breakdown'] = OrderedDict() | |
107 for sorter in sorters.iter_world(world): | |
108 breakdown = OrderedDict() | |
109 for unit in unit_set: | |
110 found = sorter.find(unit) | |
111 if found.name not in breakdown: | |
112 category = OrderedDict() | |
113 category['name'] = found.name | |
114 category['color'] = 'random' | |
115 subworlds = {} | |
116 for subworld in found.iter_subworld(): | |
117 subworlds[subworld] = False | |
118 if subworlds: | |
119 category['subworlds'] = subworlds | |
120 if found.hidden: | |
121 category['hidden'] = True | |
122 category['units'] = [] | |
123 breakdown[found.name] = category | |
124 breakdown[found.name]['units'].append(unit.unit_id) | |
125 root['breakdown'][sorter.name] = breakdown | |
126 | |
127 return root | |
128 | |
129 @staticmethod | |
130 def _iterate_vm_unit(dump, pfn_dict, bucket_set): | |
131 unit_id = 0 | |
132 for _, region in dump.iter_map: | |
133 unit_id += 1 | |
134 if region[0] == 'unhooked': | |
135 if pfn_dict and dump.pageframe_length: | |
136 for pageframe in region[1]['pageframe']: | |
137 yield UnhookedUnit(unit_id, pageframe.size, pageframe.size, | |
138 region, pageframe, pfn_dict) | |
139 else: | |
140 yield UnhookedUnit(unit_id, | |
141 int(region[1]['committed']), | |
142 int(region[1]['reserved']), | |
143 region) | |
144 elif region[0] == 'hooked': | |
145 if pfn_dict and dump.pageframe_length: | |
146 for pageframe in region[1]['pageframe']: | |
147 yield MMapUnit(unit_id, | |
148 pageframe.size, | |
149 pageframe.size, | |
150 region, bucket_set, pageframe, pfn_dict) | |
151 else: | |
152 yield MMapUnit(unit_id, | |
153 int(region[1]['committed']), | |
154 int(region[1]['reserved']), | |
155 region, | |
156 bucket_set) | |
157 else: | |
158 LOGGER.error('Unrecognized mapping status: %s' % region[0]) | |
159 | |
160 @staticmethod | |
161 def _iterate_malloc_unit(dump, bucket_set): | |
162 for line in dump.iter_stacktrace: | |
163 words = line.split() | |
164 bucket = bucket_set.get(int(words[BUCKET_ID])) | |
165 if bucket and bucket.allocator_type == 'malloc': | |
166 yield MallocUnit(int(words[BUCKET_ID]), | |
167 int(words[COMMITTED]), | |
168 int(words[ALLOC_COUNT]), | |
169 int(words[FREE_COUNT]), | |
170 bucket) | |
171 elif not bucket: | |
172 # 'Not-found' buckets are all assumed as malloc buckets. | |
173 yield MallocUnit(int(words[BUCKET_ID]), | |
174 int(words[COMMITTED]), | |
175 int(words[ALLOC_COUNT]), | |
176 int(words[FREE_COUNT]), | |
177 None) | |
OLD | NEW |