| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include <algorithm> | |
| 6 #include <windows.h> | |
| 7 #include <tlhelp32.h> // for CreateToolhelp32Snapshot() | |
| 8 #include <map> | |
| 9 | |
| 10 #include "tools/memory_watcher/memory_watcher.h" | |
| 11 #include "base/file_util.h" | |
| 12 #include "base/logging.h" | |
| 13 #include "base/metrics/stats_counters.h" | |
| 14 #include "base/strings/string_util.h" | |
| 15 #include "base/strings/utf_string_conversions.h" | |
| 16 #include "base/synchronization/lock.h" | |
| 17 #include "tools/memory_watcher/call_stack.h" | |
| 18 #include "tools/memory_watcher/preamble_patcher.h" | |
| 19 | |
| 20 static base::StatsCounter mem_in_use("MemoryInUse.Bytes"); | |
| 21 static base::StatsCounter mem_in_use_blocks("MemoryInUse.Blocks"); | |
| 22 static base::StatsCounter mem_in_use_allocs("MemoryInUse.Allocs"); | |
| 23 static base::StatsCounter mem_in_use_frees("MemoryInUse.Frees"); | |
| 24 | |
| 25 // --------------------------------------------------------------------- | |
| 26 | |
| 27 MemoryWatcher::MemoryWatcher() | |
| 28 : file_(NULL), | |
| 29 hooked_(false), | |
| 30 active_thread_id_(0) { | |
| 31 MemoryHook::Initialize(); | |
| 32 CallStack::Initialize(); | |
| 33 | |
| 34 block_map_ = new CallStackMap(); | |
| 35 | |
| 36 // Register last - only after we're ready for notifications! | |
| 37 Hook(); | |
| 38 } | |
| 39 | |
| 40 MemoryWatcher::~MemoryWatcher() { | |
| 41 Unhook(); | |
| 42 | |
| 43 CloseLogFile(); | |
| 44 | |
| 45 // Pointers in the block_map are part of the MemoryHook heap. Be sure | |
| 46 // to delete the map before closing the heap. | |
| 47 delete block_map_; | |
| 48 } | |
| 49 | |
| 50 void MemoryWatcher::Hook() { | |
| 51 DCHECK(!hooked_); | |
| 52 MemoryHook::RegisterWatcher(this); | |
| 53 hooked_ = true; | |
| 54 } | |
| 55 | |
| 56 void MemoryWatcher::Unhook() { | |
| 57 if (hooked_) { | |
| 58 MemoryHook::UnregisterWatcher(this); | |
| 59 hooked_ = false; | |
| 60 } | |
| 61 } | |
| 62 | |
| 63 void MemoryWatcher::OpenLogFile() { | |
| 64 DCHECK(file_ == NULL); | |
| 65 file_name_ = "memwatcher"; | |
| 66 if (!log_name_.empty()) { | |
| 67 file_name_ += "."; | |
| 68 file_name_ += log_name_; | |
| 69 } | |
| 70 file_name_ += ".log"; | |
| 71 char buf[16]; | |
| 72 file_name_ += _itoa(GetCurrentProcessId(), buf, 10); | |
| 73 | |
| 74 std::string tmp_name(file_name_); | |
| 75 tmp_name += ".tmp"; | |
| 76 file_ = fopen(tmp_name.c_str(), "w+"); | |
| 77 } | |
| 78 | |
| 79 void MemoryWatcher::CloseLogFile() { | |
| 80 if (file_ != NULL) { | |
| 81 fclose(file_); | |
| 82 file_ = NULL; | |
| 83 std::wstring tmp_name = base::ASCIIToWide(file_name_); | |
| 84 tmp_name += L".tmp"; | |
| 85 base::Move(base::FilePath(tmp_name), | |
| 86 base::FilePath(base::ASCIIToWide(file_name_))); | |
| 87 } | |
| 88 } | |
| 89 | |
| 90 bool MemoryWatcher::LockedRecursionDetected() const { | |
| 91 if (!active_thread_id_) return false; | |
| 92 DWORD thread_id = GetCurrentThreadId(); | |
| 93 // TODO(jar): Perchance we should use atomic access to member. | |
| 94 return thread_id == active_thread_id_; | |
| 95 } | |
| 96 | |
| 97 void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) { | |
| 98 // Don't track zeroes. It's a waste of time. | |
| 99 if (size == 0) | |
| 100 return; | |
| 101 | |
| 102 if (LockedRecursionDetected()) | |
| 103 return; | |
| 104 | |
| 105 // AllocationStack overrides new/delete to not allocate | |
| 106 // from the main heap. | |
| 107 AllocationStack* stack = new AllocationStack(size); | |
| 108 if (!stack->Valid()) return; // Recursion blocked generation of stack. | |
| 109 | |
| 110 { | |
| 111 base::AutoLock lock(block_map_lock_); | |
| 112 | |
| 113 // Ideally, we'd like to verify that the block being added | |
| 114 // here is not already in our list of tracked blocks. However, | |
| 115 // the lookup in our hash table is expensive and slows us too | |
| 116 // much. | |
| 117 CallStackMap::iterator block_it = block_map_->find(id); | |
| 118 if (block_it != block_map_->end()) { | |
| 119 #if 0 // Don't do this until stack->ToString() uses ONLY our heap. | |
| 120 active_thread_id_ = GetCurrentThreadId(); | |
| 121 PrivateAllocatorString output; | |
| 122 block_it->second->ToString(&output); | |
| 123 // VLOG(1) << "First Stack size " << stack->size() << "was\n" << output; | |
| 124 stack->ToString(&output); | |
| 125 // VLOG(1) << "Second Stack size " << stack->size() << "was\n" << output; | |
| 126 #endif // 0 | |
| 127 | |
| 128 // TODO(jar): We should delete one stack, and keep the other, perhaps | |
| 129 // based on size. | |
| 130 // For now, just delete the first, and keep the second? | |
| 131 delete block_it->second; | |
| 132 } | |
| 133 // TODO(jar): Perchance we should use atomic access to member. | |
| 134 active_thread_id_ = 0; // Note: Only do this AFTER exiting above scope! | |
| 135 | |
| 136 (*block_map_)[id] = stack; | |
| 137 } | |
| 138 | |
| 139 mem_in_use.Add(size); | |
| 140 mem_in_use_blocks.Increment(); | |
| 141 mem_in_use_allocs.Increment(); | |
| 142 } | |
| 143 | |
| 144 void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) { | |
| 145 DCHECK_GE(size, 0); | |
| 146 | |
| 147 // Don't bother with these. | |
| 148 if (size == 0) | |
| 149 return; | |
| 150 | |
| 151 if (LockedRecursionDetected()) | |
| 152 return; | |
| 153 | |
| 154 { | |
| 155 base::AutoLock lock(block_map_lock_); | |
| 156 active_thread_id_ = GetCurrentThreadId(); | |
| 157 | |
| 158 // First, find the block in our block_map. | |
| 159 CallStackMap::iterator it = block_map_->find(id); | |
| 160 if (it != block_map_->end()) { | |
| 161 AllocationStack* stack = it->second; | |
| 162 DCHECK(stack->size() == size); | |
| 163 block_map_->erase(id); | |
| 164 delete stack; | |
| 165 } else { | |
| 166 // Untracked item. This happens a fair amount, and it is | |
| 167 // normal. A lot of time elapses during process startup | |
| 168 // before the allocation routines are hooked. | |
| 169 size = 0; // Ignore size in tallies. | |
| 170 } | |
| 171 // TODO(jar): Perchance we should use atomic access to member. | |
| 172 active_thread_id_ = 0; | |
| 173 } | |
| 174 | |
| 175 mem_in_use.Add(-size); | |
| 176 mem_in_use_blocks.Decrement(); | |
| 177 mem_in_use_frees.Increment(); | |
| 178 } | |
| 179 | |
| 180 void MemoryWatcher::SetLogName(char* log_name) { | |
| 181 if (!log_name) | |
| 182 return; | |
| 183 | |
| 184 log_name_ = log_name; | |
| 185 } | |
| 186 | |
| 187 // Help sort lists of stacks based on allocation cost. | |
| 188 // Note: Sort based on allocation count is interesting too! | |
| 189 static bool CompareCallStackIdItems(MemoryWatcher::StackTrack* left, | |
| 190 MemoryWatcher::StackTrack* right) { | |
| 191 return left->size > right->size; | |
| 192 } | |
| 193 | |
| 194 | |
| 195 void MemoryWatcher::DumpLeaks() { | |
| 196 // We can only dump the leaks once. We'll cleanup the hooks here. | |
| 197 if (!hooked_) | |
| 198 return; | |
| 199 Unhook(); | |
| 200 | |
| 201 base::AutoLock lock(block_map_lock_); | |
| 202 active_thread_id_ = GetCurrentThreadId(); | |
| 203 | |
| 204 OpenLogFile(); | |
| 205 | |
| 206 // Aggregate contributions from each allocated block on per-stack basis. | |
| 207 CallStackIdMap stack_map; | |
| 208 for (CallStackMap::iterator block_it = block_map_->begin(); | |
| 209 block_it != block_map_->end(); ++block_it) { | |
| 210 AllocationStack* stack = block_it->second; | |
| 211 int32 stack_hash = stack->hash(); | |
| 212 int32 alloc_block_size = stack->size(); | |
| 213 CallStackIdMap::iterator it = stack_map.find(stack_hash); | |
| 214 if (it == stack_map.end()) { | |
| 215 StackTrack tracker; | |
| 216 tracker.count = 1; | |
| 217 tracker.size = alloc_block_size; | |
| 218 tracker.stack = stack; // Temporary pointer into block_map_. | |
| 219 stack_map[stack_hash] = tracker; | |
| 220 } else { | |
| 221 it->second.count++; | |
| 222 it->second.size += alloc_block_size; | |
| 223 } | |
| 224 } | |
| 225 // Don't release lock yet, as block_map_ is still pointed into. | |
| 226 | |
| 227 // Put references to StrackTracks into array for sorting. | |
| 228 std::vector<StackTrack*, PrivateHookAllocator<int32> > | |
| 229 stack_tracks(stack_map.size()); | |
| 230 CallStackIdMap::iterator it = stack_map.begin(); | |
| 231 for (size_t i = 0; i < stack_tracks.size(); ++i) { | |
| 232 stack_tracks[i] = &(it->second); | |
| 233 ++it; | |
| 234 } | |
| 235 sort(stack_tracks.begin(), stack_tracks.end(), CompareCallStackIdItems); | |
| 236 | |
| 237 int32 total_bytes = 0; | |
| 238 int32 total_blocks = 0; | |
| 239 for (size_t i = 0; i < stack_tracks.size(); ++i) { | |
| 240 StackTrack* stack_track = stack_tracks[i]; | |
| 241 fwprintf(file_, L"%d bytes, %d allocs, #%d\n", | |
| 242 stack_track->size, stack_track->count, i); | |
| 243 total_bytes += stack_track->size; | |
| 244 total_blocks += stack_track->count; | |
| 245 | |
| 246 CallStack* stack = stack_track->stack; | |
| 247 PrivateAllocatorString output; | |
| 248 stack->ToString(&output); | |
| 249 fprintf(file_, "%s", output.c_str()); | |
| 250 } | |
| 251 fprintf(file_, "Total Leaks: %d\n", total_blocks); | |
| 252 fprintf(file_, "Total Stacks: %d\n", stack_tracks.size()); | |
| 253 fprintf(file_, "Total Bytes: %d\n", total_bytes); | |
| 254 CloseLogFile(); | |
| 255 } | |
| OLD | NEW |