OLD | NEW |
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <windows.h> | 5 #include <windows.h> |
6 #include <tlhelp32.h> // for CreateToolhelp32Snapshot() | 6 #include <tlhelp32.h> // for CreateToolhelp32Snapshot() |
7 #include <map> | 7 #include <map> |
8 | 8 |
9 #include "tools/memory_watcher/memory_watcher.h" | 9 #include "tools/memory_watcher/memory_watcher.h" |
10 #include "base/file_util.h" | 10 #include "base/file_util.h" |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
81 fclose(file_); | 81 fclose(file_); |
82 file_ = NULL; | 82 file_ = NULL; |
83 std::wstring tmp_name = ASCIIToWide(file_name_); | 83 std::wstring tmp_name = ASCIIToWide(file_name_); |
84 tmp_name += L".tmp"; | 84 tmp_name += L".tmp"; |
85 file_util::Move(FilePath::FromWStringHack(tmp_name), | 85 file_util::Move(FilePath::FromWStringHack(tmp_name), |
86 FilePath::FromWStringHack(ASCIIToWide(file_name_))); | 86 FilePath::FromWStringHack(ASCIIToWide(file_name_))); |
87 } | 87 } |
88 } | 88 } |
89 | 89 |
90 void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) { | 90 void MemoryWatcher::OnTrack(HANDLE heap, int32 id, int32 size) { |
| 91 // Don't track zeroes. It's a waste of time. |
| 92 if (size == 0) |
| 93 return; |
| 94 |
91 // AllocationStack overrides new/delete to not allocate | 95 // AllocationStack overrides new/delete to not allocate |
92 // from the main heap. | 96 // from the main heap. |
93 AllocationStack* stack = new AllocationStack(size); | 97 AllocationStack* stack = new AllocationStack(); |
94 { | 98 { |
95 // Don't track zeroes. It's a waste of time. | |
96 if (size == 0) { | |
97 delete stack; | |
98 return; | |
99 } | |
100 | |
101 AutoLock lock(block_map_lock_); | 99 AutoLock lock(block_map_lock_); |
102 | 100 |
103 // Ideally, we'd like to verify that the block being added | 101 // Ideally, we'd like to verify that the block being added |
104 // here is not already in our list of tracked blocks. However, | 102 // here is not already in our list of tracked blocks. However, |
105 // the lookup in our hash table is expensive and slows us too | 103 // the lookup in our hash table is expensive and slows us too |
106 // much. Uncomment this line if you think you need it. | 104 // much. Uncomment this line if you think you need it. |
107 //DCHECK(block_map_->find(id) == block_map_->end()); | 105 //DCHECK(block_map_->find(id) == block_map_->end()); |
108 | 106 |
109 (*block_map_)[id] = stack; | 107 (*block_map_)[id] = stack; |
110 | 108 |
(...skipping 11 matching lines...) Expand all Loading... |
122 | 120 |
123 block_map_size_ += size; | 121 block_map_size_ += size; |
124 } | 122 } |
125 | 123 |
126 mem_in_use.Set(block_map_size_); | 124 mem_in_use.Set(block_map_size_); |
127 mem_in_use_blocks.Increment(); | 125 mem_in_use_blocks.Increment(); |
128 mem_in_use_allocs.Increment(); | 126 mem_in_use_allocs.Increment(); |
129 } | 127 } |
130 | 128 |
131 void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) { | 129 void MemoryWatcher::OnUntrack(HANDLE heap, int32 id, int32 size) { |
132 DCHECK(size >= 0); | 130 DCHECK_GE(size, 0); |
133 | 131 |
134 // Don't bother with these. | 132 // Don't bother with these. |
135 if (size == 0) | 133 if (size == 0) |
136 return; | 134 return; |
137 | 135 |
138 { | 136 { |
139 AutoLock lock(block_map_lock_); | 137 AutoLock lock(block_map_lock_); |
140 | 138 |
141 // First, find the block in our block_map. | 139 // First, find the block in our block_map. |
142 CallStackMap::iterator it = block_map_->find(id); | 140 CallStackMap::iterator it = block_map_->find(id); |
143 if (it != block_map_->end()) { | 141 if (it != block_map_->end()) { |
144 AllocationStack* stack = it->second; | 142 AllocationStack* stack = it->second; |
145 CallStackIdMap::iterator id_it = stack_map_->find(stack->hash()); | 143 CallStackIdMap::iterator id_it = stack_map_->find(stack->hash()); |
146 DCHECK(id_it != stack_map_->end()); | 144 DCHECK(id_it != stack_map_->end()); |
147 id_it->second.size -= size; | 145 id_it->second.size -= size; |
148 id_it->second.count--; | 146 id_it->second.count--; |
149 DCHECK(id_it->second.count >= 0); | 147 DCHECK_GE(id_it->second.count, 0); |
150 | 148 |
151 // If there are no more callstacks with this stack, then we | 149 // If there are no more callstacks with this stack, then we |
152 // have cleaned up all instances, and can safely delete the | 150 // have cleaned up all instances, and can safely delete the |
153 // stack pointer in the stack_map. | 151 // StackTracker in the stack_map. |
154 bool safe_to_delete = true; | 152 bool safe_to_delete = true; |
155 if (id_it->second.count == 0) | 153 if (id_it->second.count != 0) { |
156 stack_map_->erase(id_it); | 154 // See if our |StackTracker| is also using |stack|. |
157 else if (id_it->second.stack == stack) | 155 if (id_it->second.stack == stack) |
158 safe_to_delete = false; // we're still using the stack | 156 safe_to_delete = false; // We're still using |stack|. |
| 157 } else { |
| 158 // See if we skipped deleting our |StackTracker|'s |stack| earlier. |
| 159 if (id_it->second.stack != stack) |
| 160 delete id_it->second.stack; // We skipped it earlier. |
| 161 stack_map_->erase(id_it); // Discard our StackTracker. |
| 162 } |
159 | 163 |
160 block_map_size_ -= size; | 164 block_map_size_ -= size; |
161 block_map_->erase(id); | 165 block_map_->erase(id); |
162 if (safe_to_delete) | 166 if (safe_to_delete) |
163 delete stack; | 167 delete stack; |
164 } else { | 168 } else { |
165 // Untracked item. This happens a fair amount, and it is | 169 // Untracked item. This happens a fair amount, and it is |
166 // normal. A lot of time elapses during process startup | 170 // normal. A lot of time elapses during process startup |
167 // before the allocation routines are hooked. | 171 // before the allocation routines are hooked. |
168 } | 172 } |
(...skipping 29 matching lines...) Expand all Loading... |
198 std::string output; | 202 std::string output; |
199 stack->ToString(&output); | 203 stack->ToString(&output); |
200 fprintf(file_, "%s", output.c_str()); | 204 fprintf(file_, "%s", output.c_str()); |
201 it++; | 205 it++; |
202 } | 206 } |
203 fprintf(file_, "Total Leaks: %d\n", block_map_->size()); | 207 fprintf(file_, "Total Leaks: %d\n", block_map_->size()); |
204 fprintf(file_, "Total Stacks: %d\n", stack_map_->size()); | 208 fprintf(file_, "Total Stacks: %d\n", stack_map_->size()); |
205 fprintf(file_, "Total Bytes: %d\n", block_map_size_); | 209 fprintf(file_, "Total Bytes: %d\n", block_map_size_); |
206 CloseLogFile(); | 210 CloseLogFile(); |
207 } | 211 } |
OLD | NEW |