| Index: tools/telemetry/telemetry/timeline/memory_dump_event.py
|
| diff --git a/tools/telemetry/telemetry/timeline/memory_dump_event.py b/tools/telemetry/telemetry/timeline/memory_dump_event.py
|
| index 26787ed8ebafe7a8e3fee2867d44b3edbbfd3858..0e99e9520fa05785056d9dbb64f1aa84f7d49a94 100644
|
| --- a/tools/telemetry/telemetry/timeline/memory_dump_event.py
|
| +++ b/tools/telemetry/telemetry/timeline/memory_dump_event.py
|
| @@ -152,6 +152,7 @@ class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
|
| super(ProcessMemoryDumpEvent, self).__init__(
|
| 'memory', 'memory_dump', event['ts'] / 1000.0, 0.0)
|
|
|
| + self._event = event
|
| self.process = process
|
| self.dump_id = event['id']
|
|
|
| @@ -161,6 +162,7 @@ class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
|
| allocators_dict = {}
|
| # populate keys that should always be present
|
| self._allocators = {}
|
| + blink_overhead = 0
|
| for allocator_name, size_values in allocators_dict.iteritems():
|
| name_parts = allocator_name.split('/')
|
| # we want to skip allocated_objects, since they are already counted by
|
| @@ -168,6 +170,10 @@ class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
|
| # allocators is only inherited from its allocated_objects.
|
| if name_parts[-1] == 'allocated_objects' and name_parts[0] != 'malloc':
|
| continue
|
| + # TODO(bashi): Quick hack to subtract PartitionAlloc overhead
|
| + if name_parts[-1] == 'BlinkTracedValueOverhead':
|
| + blink_overhead += int(size_values['attrs']['size']['value'], 16)
|
| + continue
|
| allocator_name = name_parts[0]
|
| allocator = self._allocators.setdefault(allocator_name, {})
|
| for size_key, size_value in size_values['attrs'].iteritems():
|
| @@ -176,6 +182,8 @@ class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
|
| # we need to discount tracing from malloc size.
|
| try:
|
| self._allocators['malloc']['size'] -= self._allocators['tracing']['size']
|
| + self._allocators['partition_alloc']['size'] -= blink_overhead
|
| + print('***** overhead: %d' % blink_overhead)
|
| except KeyError:
|
| pass # it's ok if any of those keys are not present
|
|
|
| @@ -202,7 +210,7 @@ class ProcessMemoryDumpEvent(timeline_event.TimelineEvent):
|
| category = category.GetMatchingChild(mapped_file)
|
|
|
| def __repr__(self):
|
| - values = ['pid=%d' % self.pid]
|
| + values = ['pid=%d' % self.process.pid]
|
| for key, value in sorted(self.GetStatsSummary().iteritems()):
|
| values.append('%s=%d' % (key, value))
|
| values = ', '.join(values)
|
| @@ -269,9 +277,10 @@ class GlobalMemoryDump(object):
|
| self.dump_id = dump_ids.pop()
|
|
|
| # Either all processes have mmaps or none of them do.
|
| - have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
|
| - assert len(have_mmaps) == 1
|
| - self.has_mmaps = have_mmaps.pop()
|
| + #have_mmaps = set(dump.has_mmaps for dump in self._process_dumps)
|
| + #assert len(have_mmaps) == 1
|
| + #self.has_mmaps = have_mmaps.pop()
|
| + self.has_mmaps = any(dump.has_mmaps for dump in self._process_dumps)
|
|
|
| @property
|
| def start(self):
|
|
|