OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/trace_event/trace_event_memory_overhead.h" |
| 6 |
| 7 #include <algorithm> |
| 8 |
| 9 #include "base/memory/ref_counted_memory.h" |
| 10 #include "base/strings/stringprintf.h" |
| 11 #include "base/trace_event/memory_allocator_dump.h" |
| 12 #include "base/trace_event/process_memory_dump.h" |
| 13 #include "base/values.h" |
| 14 |
| 15 namespace { |
| 16 size_t RoundUp(size_t size, size_t alignment) { |
| 17 return (size + alignment - 1) & ~(alignment - 1); |
| 18 } |
| 19 } // namespace |
| 20 |
| 21 namespace base { |
| 22 namespace trace_event { |
| 23 |
| 24 TraceEventMemoryOverhead::TraceEventMemoryOverhead() {} |
| 25 |
| 26 TraceEventMemoryOverhead::~TraceEventMemoryOverhead() {} |
| 27 |
| 28 void TraceEventMemoryOverhead::AddOrCreateInternal( |
| 29 const char* object_type, |
| 30 size_t count, |
| 31 size_t allocated_size_in_bytes, |
| 32 size_t resident_size_in_bytes) { |
| 33 auto it = allocated_objects_.find(object_type); |
| 34 if (it == allocated_objects_.end()) { |
| 35 allocated_objects_.insert(std::make_pair( |
| 36 object_type, ObjectCountAndSize({count, allocated_size_in_bytes, |
| 37 resident_size_in_bytes}))); |
| 38 return; |
| 39 } |
| 40 it->second.count += count; |
| 41 it->second.allocated_size_in_bytes += allocated_size_in_bytes; |
| 42 it->second.resident_size_in_bytes += resident_size_in_bytes; |
| 43 } |
| 44 |
| 45 void TraceEventMemoryOverhead::Add(const char* object_type, |
| 46 size_t allocated_size_in_bytes) { |
| 47 Add(object_type, allocated_size_in_bytes, allocated_size_in_bytes); |
| 48 } |
| 49 |
| 50 void TraceEventMemoryOverhead::Add(const char* object_type, |
| 51 size_t allocated_size_in_bytes, |
| 52 size_t resident_size_in_bytes) { |
| 53 AddOrCreateInternal(object_type, 1, allocated_size_in_bytes, |
| 54 resident_size_in_bytes); |
| 55 } |
| 56 |
| 57 void TraceEventMemoryOverhead::AddString(const std::string& str) { |
| 58 // The number below are empirical and mainly based on profiling of real-world |
| 59 // std::string implementations: |
| 60 // - even short string end up malloc()-inc at least 32 bytes. |
| 61 // - longer stings seem to malloc() multiples of 16 bytes. |
| 62 Add("std::string", |
| 63 sizeof(std::string) + std::max<size_t>(RoundUp(str.capacity(), 16), 32u)); |
| 64 } |
| 65 |
| 66 void TraceEventMemoryOverhead::AddRefCountedString( |
| 67 const RefCountedString& str) { |
| 68 Add("RefCountedString", sizeof(RefCountedString)); |
| 69 AddString(str.data()); |
| 70 } |
| 71 |
| 72 void TraceEventMemoryOverhead::AddValue(const Value& value) { |
| 73 switch (value.GetType()) { |
| 74 case Value::TYPE_NULL: |
| 75 case Value::TYPE_BOOLEAN: |
| 76 case Value::TYPE_INTEGER: |
| 77 case Value::TYPE_DOUBLE: |
| 78 Add("FundamentalValue", sizeof(Value)); |
| 79 break; |
| 80 |
| 81 case Value::TYPE_STRING: { |
| 82 const StringValue* string_value = nullptr; |
| 83 value.GetAsString(&string_value); |
| 84 Add("StringValue", sizeof(StringValue)); |
| 85 AddString(string_value->GetString()); |
| 86 } break; |
| 87 |
| 88 case Value::TYPE_BINARY: { |
| 89 const BinaryValue* binary_value = nullptr; |
| 90 value.GetAsBinary(&binary_value); |
| 91 Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize()); |
| 92 } break; |
| 93 |
| 94 case Value::TYPE_DICTIONARY: { |
| 95 const DictionaryValue* dictionary_value = nullptr; |
| 96 value.GetAsDictionary(&dictionary_value); |
| 97 Add("DictionaryValue", sizeof(DictionaryValue)); |
| 98 for (DictionaryValue::Iterator it(*dictionary_value); !it.IsAtEnd(); |
| 99 it.Advance()) { |
| 100 AddString(it.key()); |
| 101 AddValue(it.value()); |
| 102 } |
| 103 } break; |
| 104 |
| 105 case Value::TYPE_LIST: { |
| 106 const ListValue* list_value = nullptr; |
| 107 value.GetAsList(&list_value); |
| 108 Add("ListValue", sizeof(ListValue)); |
| 109 for (const Value* v : *list_value) |
| 110 AddValue(*v); |
| 111 } break; |
| 112 |
| 113 default: |
| 114 NOTREACHED(); |
| 115 } |
| 116 } |
| 117 |
| 118 void TraceEventMemoryOverhead::AddSelf() { |
| 119 size_t estimated_size = sizeof(*this); |
| 120 // If the SmallMap did overflow its static capacity, its elements will be |
| 121 // allocated on the heap and have to be accounted separately. |
| 122 if (allocated_objects_.UsingFullMap()) |
| 123 estimated_size += sizeof(map_type::value_type) * allocated_objects_.size(); |
| 124 Add("TraceEventMemoryOverhead", estimated_size); |
| 125 } |
| 126 |
| 127 void TraceEventMemoryOverhead::Update(const TraceEventMemoryOverhead& other) { |
| 128 for (const auto& it : other.allocated_objects_) { |
| 129 AddOrCreateInternal(it.first, it.second.count, |
| 130 it.second.allocated_size_in_bytes, |
| 131 it.second.resident_size_in_bytes); |
| 132 } |
| 133 } |
| 134 |
| 135 void TraceEventMemoryOverhead::DumpInto(const char* base_name, |
| 136 ProcessMemoryDump* pmd) const { |
| 137 for (const auto& it : allocated_objects_) { |
| 138 std::string dump_name = StringPrintf("%s/%s", base_name, it.first); |
| 139 MemoryAllocatorDump* mad = pmd->CreateAllocatorDump(dump_name); |
| 140 mad->AddScalar(MemoryAllocatorDump::kNameSize, |
| 141 MemoryAllocatorDump::kUnitsBytes, |
| 142 it.second.allocated_size_in_bytes); |
| 143 mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes, |
| 144 it.second.resident_size_in_bytes); |
| 145 mad->AddScalar(MemoryAllocatorDump::kNameObjectsCount, |
| 146 MemoryAllocatorDump::kUnitsObjects, it.second.count); |
| 147 } |
| 148 } |
| 149 |
| 150 } // namespace trace_event |
| 151 } // namespace base |
OLD | NEW |