| OLD | NEW |
| 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <windows.h> | 5 #include <windows.h> |
| 6 #include <tlhelp32.h> // for CreateToolhelp32Snapshot() | 6 #include <tlhelp32.h> // for CreateToolhelp32Snapshot() |
| 7 #include <map> | 7 #include <map> |
| 8 | 8 |
| 9 #include "tools/memory_watcher/memory_watcher.h" | 9 #include "tools/memory_watcher/memory_watcher.h" |
| 10 #include "base/file_util.h" | 10 #include "base/file_util.h" |
| 11 #include "base/lock.h" | 11 #include "base/lock.h" |
| 12 #include "base/logging.h" | 12 #include "base/logging.h" |
| 13 #include "base/stats_counters.h" | 13 #include "base/stats_counters.h" |
| 14 #include "base/string_util.h" | 14 #include "base/string_util.h" |
| 15 #include "tools/memory_watcher/call_stack.h" | 15 #include "tools/memory_watcher/call_stack.h" |
| 16 #include "tools/memory_watcher/preamble_patcher.h" | 16 #include "tools/memory_watcher/preamble_patcher.h" |
| 17 | 17 |
| 18 static StatsCounter mem_in_use("MemoryInUse.Bytes"); | 18 static StatsCounter mem_in_use("MemoryInUse.Bytes"); |
| 19 static StatsCounter mem_in_use_blocks("MemoryInUse.Blocks"); | 19 static StatsCounter mem_in_use_blocks("MemoryInUse.Blocks"); |
| 20 static StatsCounter mem_in_use_allocs("MemoryInUse.Allocs"); | 20 static StatsCounter mem_in_use_allocs("MemoryInUse.Allocs"); |
| 21 static StatsCounter mem_in_use_frees("MemoryInUse.Frees"); | 21 static StatsCounter mem_in_use_frees("MemoryInUse.Frees"); |
| 22 | 22 |
| 23 // --------------------------------------------------------------------- | 23 // --------------------------------------------------------------------- |
| 24 | 24 |
| 25 MemoryWatcher::MemoryWatcher() | 25 MemoryWatcher::MemoryWatcher() |
| 26 : file_(NULL), | 26 : file_(NULL), |
| 27 hooked_(false), | 27 hooked_(false), |
| 28 in_track_(false), | 28 active_thread_id_(0) { |
| 29 block_map_size_(0) { | |
| 30 MemoryHook::Initialize(); | 29 MemoryHook::Initialize(); |
| 31 CallStack::Initialize(); | 30 CallStack::Initialize(); |
| 32 | 31 |
| 33 block_map_ = new CallStackMap(); | 32 block_map_ = new CallStackMap(); |
| 34 stack_map_ = new CallStackIdMap(); | |
| 35 | 33 |
| 36 // Register last - only after we're ready for notifications! | 34 // Register last - only after we're ready for notifications! |
| 37 Hook(); | 35 Hook(); |
| 38 } | 36 } |
| 39 | 37 |
| 40 MemoryWatcher::~MemoryWatcher() { | 38 MemoryWatcher::~MemoryWatcher() { |
| 41 Unhook(); | 39 Unhook(); |
| 42 | 40 |
| 43 CloseLogFile(); | 41 CloseLogFile(); |
| 44 | 42 |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 80 if (file_ != NULL) { | 78 if (file_ != NULL) { |
| 81 fclose(file_); | 79 fclose(file_); |
| 82 file_ = NULL; | 80 file_ = NULL; |
| 83 std::wstring tmp_name = ASCIIToWide(file_name_); | 81 std::wstring tmp_name = ASCIIToWide(file_name_); |
| 84 tmp_name += L".tmp"; | 82 tmp_name += L".tmp"; |
| 85 file_util::Move(FilePath::FromWStringHack(tmp_name), | 83 file_util::Move(FilePath::FromWStringHack(tmp_name), |
| 86 FilePath::FromWStringHack(ASCIIToWide(file_name_))); | 84 FilePath::FromWStringHack(ASCIIToWide(file_name_))); |
| 87 } | 85 } |
| 88 } | 86 } |
| 89 | 87 |
| 88 bool MemoryWatcher::LockedRecursionDetected() const { |
| 89 if (!active_thread_id_) return false; |
| 90 DWORD thread_id = GetCurrentThreadId(); |
| 91 // TODO(jar): Perchance we should use atomic access to member. |
| 92 return thread_id == active_thread_id_; |
| 93 } |
| 94 |
| 90 void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) { | 95 void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) { |
| 91 // Don't track zeroes. It's a waste of time. | 96 // Don't track zeroes. It's a waste of time. |
| 92 if (size == 0) | 97 if (size == 0) |
| 93 return; | 98 return; |
| 94 | 99 |
| 100 if (LockedRecursionDetected()) |
| 101 return; |
| 102 |
| 95 // AllocationStack overrides new/delete to not allocate | 103 // AllocationStack overrides new/delete to not allocate |
| 96 // from the main heap. | 104 // from the main heap. |
| 97 AllocationStack* stack = new AllocationStack(); | 105 AllocationStack* stack = new AllocationStack(size); |
| 106 if (!stack->Valid()) return; // Recursion blocked generation of stack. |
| 107 |
| 98 { | 108 { |
| 99 AutoLock lock(block_map_lock_); | 109 AutoLock lock(block_map_lock_); |
| 100 | 110 |
| 101 // Ideally, we'd like to verify that the block being added | 111 // Ideally, we'd like to verify that the block being added |
| 102 // here is not already in our list of tracked blocks. However, | 112 // here is not already in our list of tracked blocks. However, |
| 103 // the lookup in our hash table is expensive and slows us too | 113 // the lookup in our hash table is expensive and slows us too |
| 104 // much. Uncomment this line if you think you need it. | 114 // much. |
| 105 //DCHECK(block_map_->find(id) == block_map_->end()); | 115 CallStackMap::iterator block_it = block_map_->find(id); |
| 116 if (block_it != block_map_->end()) { |
| 117 #if 0 // Don't do this until stack->ToString() uses ONLY our heap. |
| 118 active_thread_id_ = GetCurrentThreadId(); |
| 119 PrivateAllocatorString output; |
| 120 block_it->second->ToString(&output); |
| 121 // LOG(INFO) << "First Stack size " << stack->size() << "was\n" << output; |
| 122 stack->ToString(&output); |
| 123 // LOG(INFO) << "Second Stack size " << stack->size() << "was\n" << output; |
| 124 #endif // 0 |
| 125 |
| 126 // TODO(jar): We should delete one stack, and keep the other, perhaps |
| 127 // based on size. |
| 128 // For now, just delete the first, and keep the second? |
| 129 delete block_it->second; |
| 130 } |
| 131 // TODO(jar): Perchance we should use atomic access to member. |
| 132 active_thread_id_ = 0; // Note: Only do this AFTER exiting above scope! |
| 106 | 133 |
| 107 (*block_map_)[id] = stack; | 134 (*block_map_)[id] = stack; |
| 108 | |
| 109 CallStackIdMap::iterator it = stack_map_->find(stack->hash()); | |
| 110 if (it != stack_map_->end()) { | |
| 111 it->second.size += size; | |
| 112 it->second.count++; | |
| 113 } else { | |
| 114 StackTrack tracker; | |
| 115 tracker.count = 1; | |
| 116 tracker.size = size; | |
| 117 tracker.stack = stack; | |
| 118 (*stack_map_)[stack->hash()] = tracker; | |
| 119 } | |
| 120 | |
| 121 block_map_size_ += size; | |
| 122 } | 135 } |
| 123 | 136 |
| 124 mem_in_use.Set(block_map_size_); | 137 mem_in_use.Add(size); |
| 125 mem_in_use_blocks.Increment(); | 138 mem_in_use_blocks.Increment(); |
| 126 mem_in_use_allocs.Increment(); | 139 mem_in_use_allocs.Increment(); |
| 127 } | 140 } |
| 128 | 141 |
| 129 void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) { | 142 void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) { |
| 130 DCHECK_GE(size, 0); | 143 DCHECK_GE(size, 0); |
| 131 | 144 |
| 132 // Don't bother with these. | 145 // Don't bother with these. |
| 133 if (size == 0) | 146 if (size == 0) |
| 134 return; | 147 return; |
| 135 | 148 |
| 149 if (LockedRecursionDetected()) |
| 150 return; |
| 151 |
| 136 { | 152 { |
| 137 AutoLock lock(block_map_lock_); | 153 AutoLock lock(block_map_lock_); |
| 154 active_thread_id_ = GetCurrentThreadId(); |
| 138 | 155 |
| 139 // First, find the block in our block_map. | 156 // First, find the block in our block_map. |
| 140 CallStackMap::iterator it = block_map_->find(id); | 157 CallStackMap::iterator it = block_map_->find(id); |
| 141 if (it != block_map_->end()) { | 158 if (it != block_map_->end()) { |
| 142 AllocationStack* stack = it->second; | 159 AllocationStack* stack = it->second; |
| 143 CallStackIdMap::iterator id_it = stack_map_->find(stack->hash()); | 160 DCHECK(stack->size() == size); |
| 144 DCHECK(id_it != stack_map_->end()); | |
| 145 id_it->second.size -= size; | |
| 146 id_it->second.count--; | |
| 147 DCHECK_GE(id_it->second.count, 0); | |
| 148 | |
| 149 // If there are no more callstacks with this stack, then we | |
| 150 // have cleaned up all instances, and can safely delete the | |
| 151 // StackTracker in the stack_map. | |
| 152 bool safe_to_delete = true; | |
| 153 if (id_it->second.count != 0) { | |
| 154 // See if our |StackTracker| is also using |stack|. | |
| 155 if (id_it->second.stack == stack) | |
| 156 safe_to_delete = false; // We're still using |stack|. | |
| 157 } else { | |
| 158 // See if we skipped deleting our |StackTracker|'s |stack| earlier. | |
| 159 if (id_it->second.stack != stack) | |
| 160 delete id_it->second.stack; // We skipped it earlier. | |
| 161 stack_map_->erase(id_it); // Discard our StackTracker. | |
| 162 } | |
| 163 | |
| 164 block_map_size_ -= size; | |
| 165 block_map_->erase(id); | 161 block_map_->erase(id); |
| 166 if (safe_to_delete) | 162 delete stack; |
| 167 delete stack; | |
| 168 } else { | 163 } else { |
| 169 // Untracked item. This happens a fair amount, and it is | 164 // Untracked item. This happens a fair amount, and it is |
| 170 // normal. A lot of time elapses during process startup | 165 // normal. A lot of time elapses during process startup |
| 171 // before the allocation routines are hooked. | 166 // before the allocation routines are hooked. |
| 167 size = 0; // Ignore size in tallies. |
| 172 } | 168 } |
| 169 // TODO(jar): Perchance we should use atomic access to member. |
| 170 active_thread_id_ = 0; |
| 173 } | 171 } |
| 174 | 172 |
| 175 mem_in_use.Set(block_map_size_); | 173 mem_in_use.Add(-size); |
| 176 mem_in_use_blocks.Decrement(); | 174 mem_in_use_blocks.Decrement(); |
| 177 mem_in_use_frees.Increment(); | 175 mem_in_use_frees.Increment(); |
| 178 } | 176 } |
| 179 | 177 |
| 180 void MemoryWatcher::SetLogName(char* log_name) { | 178 void MemoryWatcher::SetLogName(char* log_name) { |
| 181 if (!log_name) | 179 if (!log_name) |
| 182 return; | 180 return; |
| 183 | 181 |
| 184 log_name_ = log_name; | 182 log_name_ = log_name; |
| 185 } | 183 } |
| 186 | 184 |
| 185 // Help sort lists of stacks based on allocation cost. |
| 186 // Note: Sort based on allocation count is interesting too! |
| 187 static bool CompareCallStackIdItems(MemoryWatcher::StackTrack* left, |
| 188 MemoryWatcher::StackTrack* right) { |
| 189 return left->size > right->size; |
| 190 } |
| 191 |
| 192 |
| 187 void MemoryWatcher::DumpLeaks() { | 193 void MemoryWatcher::DumpLeaks() { |
| 188 // We can only dump the leaks once. We'll cleanup the hooks here. | 194 // We can only dump the leaks once. We'll cleanup the hooks here. |
| 189 DCHECK(hooked_); | 195 if (!hooked_) |
| 196 return; |
| 190 Unhook(); | 197 Unhook(); |
| 191 | 198 |
| 192 AutoLock lock(block_map_lock_); | 199 AutoLock lock(block_map_lock_); |
| 200 active_thread_id_ = GetCurrentThreadId(); |
| 193 | 201 |
| 194 OpenLogFile(); | 202 OpenLogFile(); |
| 195 | 203 |
| 196 // Dump the stack map. | 204 // Aggregate contributions from each allocated block on per-stack basis. |
| 197 CallStackIdMap::iterator it = stack_map_->begin(); | 205 CallStackIdMap stack_map; |
| 198 while (it != stack_map_->end()) { | 206 for (CallStackMap::iterator block_it = block_map_->begin(); |
| 199 fwprintf(file_, L"%d bytes, %d items (0x%x)\n", | 207 block_it != block_map_->end(); ++block_it) { |
| 200 it->second.size, it->second.count, it->first); | 208 AllocationStack* stack = block_it->second; |
| 201 CallStack* stack = it->second.stack; | 209 int32 stack_hash = stack->hash(); |
| 202 std::string output; | 210 int32 alloc_block_size = stack->size(); |
| 211 CallStackIdMap::iterator it = stack_map.find(stack_hash); |
| 212 if (it == stack_map.end()) { |
| 213 StackTrack tracker; |
| 214 tracker.count = 1; |
| 215 tracker.size = alloc_block_size; |
| 216 tracker.stack = stack; // Temporary pointer into block_map_. |
| 217 stack_map[stack_hash] = tracker; |
| 218 } else { |
| 219 it->second.count++; |
| 220 it->second.size += alloc_block_size; |
| 221 } |
| 222 } |
| 223 // Don't release lock yet, as block_map_ is still pointed into. |
| 224 |
| 225 // Put references to StrackTracks into array for sorting. |
| 226 std::vector<StackTrack*, PrivateHookAllocator<int32> > |
| 227 stack_tracks(stack_map.size()); |
| 228 CallStackIdMap::iterator it = stack_map.begin(); |
| 229 for (size_t i = 0; i < stack_tracks.size(); ++i) { |
| 230 stack_tracks[i] = &(it->second); |
| 231 ++it; |
| 232 } |
| 233 sort(stack_tracks.begin(), stack_tracks.end(), CompareCallStackIdItems); |
| 234 |
| 235 int32 total_bytes = 0; |
| 236 int32 total_blocks = 0; |
| 237 for (size_t i = 0; i < stack_tracks.size(); ++i) { |
| 238 StackTrack* stack_track = stack_tracks[i]; |
| 239 fwprintf(file_, L"%d bytes, %d allocs, #%d\n", |
| 240 stack_track->size, stack_track->count, i); |
| 241 total_bytes += stack_track->size; |
| 242 total_blocks += stack_track->count; |
| 243 |
| 244 CallStack* stack = stack_track->stack; |
| 245 PrivateAllocatorString output; |
| 203 stack->ToString(&output); | 246 stack->ToString(&output); |
| 204 fprintf(file_, "%s", output.c_str()); | 247 fprintf(file_, "%s", output.c_str()); |
| 205 it++; | |
| 206 } | 248 } |
| 207 fprintf(file_, "Total Leaks: %d\n", block_map_->size()); | 249 fprintf(file_, "Total Leaks: %d\n", total_blocks); |
| 208 fprintf(file_, "Total Stacks: %d\n", stack_map_->size()); | 250 fprintf(file_, "Total Stacks: %d\n", stack_tracks.size()); |
| 209 fprintf(file_, "Total Bytes: %d\n", block_map_size_); | 251 fprintf(file_, "Total Bytes: %d\n", total_bytes); |
| 210 CloseLogFile(); | 252 CloseLogFile(); |
| 211 } | 253 } |
| OLD | NEW |