Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: tools/telemetry/telemetry/timeline/memory_dump_event.py

Issue 1553183002: [telemetry] Add support for composable process dumps in memory-infra (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fixes. Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | tools/telemetry/telemetry/timeline/memory_dump_event_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import posixpath 5 import posixpath
6 import re 6 import re
7 7
8 from telemetry.timeline import event as timeline_event 8 from telemetry.timeline import event as timeline_event
9 9
10 10
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 return self._bucket[name] 133 return self._bucket[name]
134 134
135 135
136 class ProcessMemoryDumpEvent(timeline_event.TimelineEvent): 136 class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
137 """A memory dump event belonging to a single timeline.Process object. 137 """A memory dump event belonging to a single timeline.Process object.
138 138
139 It's a subclass of telemetry's TimelineEvent so it can be included in 139 It's a subclass of telemetry's TimelineEvent so it can be included in
140 the stream of events contained in timeline.model objects, and have its 140 the stream of events contained in timeline.model objects, and have its
141 timing correlated with that of other events in the model. 141 timing correlated with that of other events in the model.
142 142
143 Args:
144 process: The Process object associated with the memory dump.
145 dump_events: A list of dump events of the process with the same dump id.
146
143 Properties: 147 Properties:
144 dump_id: A string to identify events belonging to the same global dump. 148 dump_id: A string to identify events belonging to the same global dump.
145 process: The timeline.Process object that owns this memory dump event. 149 process: The timeline.Process object that owns this memory dump event.
146 has_mmaps: True if the memory dump has mmaps information. If False then 150 has_mmaps: True if the memory dump has mmaps information. If False then
147 GetMemoryUsage will report all zeros. 151 GetMemoryUsage will report all zeros.
148 """ 152 """
149 def __init__(self, process, event): 153 def __init__(self, process, dump_events):
150 assert event['ph'] == 'v' and process.pid == event['pid'] 154 assert dump_events
151 155
152 super(ProcessMemoryDumpEvent, self).__init__( 156 start_time = min(event['ts'] for event in dump_events) / 1000.0
153 'memory', 'memory_dump', event['ts'] / 1000.0, 0.0) 157 duration = max(event['ts'] for event in dump_events) / 1000.0 - start_time
158 super(ProcessMemoryDumpEvent, self).__init__('memory', 'memory_dump',
159 start_time, duration)
154 160
155 self.process = process 161 self.process = process
156 self.dump_id = event['id'] 162 self.dump_id = dump_events[0]['id']
157 163
158 try: 164 allocator_dumps = {}
159 allocators_dict = event['args']['dumps']['allocators'] 165 vm_regions = []
160 except KeyError: 166 for event in dump_events:
161 allocators_dict = {} 167 assert (event['ph'] == 'v' and self.process.pid == event['pid'] and
168 self.dump_id == event['id'])
169 try:
170 allocator_dumps.update(event['args']['dumps']['allocators'])
171 except KeyError:
172 pass # It's ok if any of those keys are not present.
173 try:
174 value = event['args']['dumps']['process_mmaps']['vm_regions']
175 assert not vm_regions
176 vm_regions = value
177 except KeyError:
178 pass # It's ok if any of those keys are not present.
179
162 self._allocators = {} 180 self._allocators = {}
163 parent_path = '' 181 parent_path = ''
164 parent_has_size = False 182 parent_has_size = False
165 for allocator_name, size_values in sorted(allocators_dict.iteritems()): 183 for allocator_name, size_values in sorted(allocator_dumps.iteritems()):
166 if ((allocator_name.startswith(parent_path) and parent_has_size) 184 if ((allocator_name.startswith(parent_path) and parent_has_size) or
167 or allocator_name.startswith('global/')): 185 allocator_name.startswith('global/')):
168 continue 186 continue
169 parent_path = allocator_name + '/' 187 parent_path = allocator_name + '/'
170 parent_has_size = 'size' in size_values['attrs'] 188 parent_has_size = 'size' in size_values['attrs']
171 name_parts = allocator_name.split('/') 189 name_parts = allocator_name.split('/')
172 allocator_name = name_parts[0] 190 allocator_name = name_parts[0]
173 # For 'gpu/android_memtrack/*' we want to keep track of individual 191 # For 'gpu/android_memtrack/*' we want to keep track of individual
174 # components. E.g. 'gpu/android_memtrack/gl' will be stored as 192 # components. E.g. 'gpu/android_memtrack/gl' will be stored as
175 # 'android_memtrack_gl' in the allocators dict. 193 # 'android_memtrack_gl' in the allocators dict.
176 if (len(name_parts) == 3 and allocator_name == 'gpu' 194 if (len(name_parts) == 3 and allocator_name == 'gpu' and
177 and name_parts[1] == 'android_memtrack'): 195 name_parts[1] == 'android_memtrack'):
178 allocator_name = '_'.join(name_parts[1:3]) 196 allocator_name = '_'.join(name_parts[1:3])
179 allocator = self._allocators.setdefault(allocator_name, {}) 197 allocator = self._allocators.setdefault(allocator_name, {})
180 for size_key, size_value in size_values['attrs'].iteritems(): 198 for size_key, size_value in size_values['attrs'].iteritems():
181 if size_value['units'] == 'bytes': 199 if size_value['units'] == 'bytes':
182 allocator[size_key] = (allocator.get(size_key, 0) 200 allocator[size_key] = (allocator.get(size_key, 0)
183 + int(size_value['value'], 16)) 201 + int(size_value['value'], 16))
184 # we need to discount tracing from malloc size. 202 # we need to discount tracing from malloc size.
185 try: 203 try:
186 self._allocators['malloc']['size'] -= self._allocators['tracing']['size'] 204 self._allocators['malloc']['size'] -= self._allocators['tracing']['size']
187 except KeyError: 205 except KeyError:
188 pass # it's ok if any of those keys are not present 206 pass # It's ok if any of those keys are not present.
189 207
208 self.has_mmaps = bool(vm_regions)
190 self._buckets = {} 209 self._buckets = {}
191 try:
192 vm_regions = event['args']['dumps']['process_mmaps']['vm_regions']
193 except KeyError:
194 vm_regions = []
195 self.has_mmaps = bool(vm_regions)
196 for vm_region in vm_regions: 210 for vm_region in vm_regions:
197 self._AddRegion(vm_region) 211 self._AddRegion(vm_region)
198 212
199 @property 213 @property
200 def process_name(self): 214 def process_name(self):
201 return self.process.name 215 return self.process.name
202 216
203 def _AddRegion(self, vm_region): 217 def _AddRegion(self, vm_region):
204 path = '' 218 path = ''
205 category = ROOT_CATEGORY 219 category = ROOT_CATEGORY
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after
290 have_mmaps = set(dump.has_mmaps for dump in self._process_dumps) 304 have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
291 assert len(have_mmaps) == 1 305 assert len(have_mmaps) == 1
292 self.has_mmaps = have_mmaps.pop() 306 self.has_mmaps = have_mmaps.pop()
293 307
294 @property 308 @property
295 def start(self): 309 def start(self):
296 return self._process_dumps[0].start 310 return self._process_dumps[0].start
297 311
298 @property 312 @property
299 def end(self): 313 def end(self):
300 return self._process_dumps[-1].start 314 return max(dump.end for dump in self._process_dumps)
301 315
302 @property 316 @property
303 def duration(self): 317 def duration(self):
304 return self.end - self.start 318 return self.end - self.start
305 319
306 def IterProcessMemoryDumps(self): 320 def IterProcessMemoryDumps(self):
307 return iter(self._process_dumps) 321 return iter(self._process_dumps)
308 322
309 def __repr__(self): 323 def __repr__(self):
310 values = ['id=%s' % self.dump_id] 324 values = ['id=%s' % self.dump_id]
311 for key, value in sorted(self.GetMemoryUsage().iteritems()): 325 for key, value in sorted(self.GetMemoryUsage().iteritems()):
312 values.append('%s=%d' % (key, value)) 326 values.append('%s=%d' % (key, value))
313 values = ', '.join(values) 327 values = ', '.join(values)
314 return '%s[%s]' % (type(self).__name__, values) 328 return '%s[%s]' % (type(self).__name__, values)
315 329
316 def GetMemoryUsage(self): 330 def GetMemoryUsage(self):
317 """Get the aggregated memory usage over all processes in this dump.""" 331 """Get the aggregated memory usage over all processes in this dump."""
318 result = {} 332 result = {}
319 for dump in self._process_dumps: 333 for dump in self._process_dumps:
320 for key, value in dump.GetMemoryUsage().iteritems(): 334 for key, value in dump.GetMemoryUsage().iteritems():
321 result[key] = result.get(key, 0) + value 335 result[key] = result.get(key, 0) + value
322 return result 336 return result
OLDNEW
« no previous file with comments | « no previous file | tools/telemetry/telemetry/timeline/memory_dump_event_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698