OLD | NEW |
---|---|
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
95 #if defined(TYPE_PROFILING) | 95 #if defined(TYPE_PROFILING) |
96 static const char kTypeProfileStatsHeader[] = "type statistics:\n"; | 96 static const char kTypeProfileStatsHeader[] = "type statistics:\n"; |
97 #endif // defined(TYPE_PROFILING) | 97 #endif // defined(TYPE_PROFILING) |
98 | 98 |
99 //---------------------------------------------------------------------- | 99 //---------------------------------------------------------------------- |
100 | 100 |
101 const char HeapProfileTable::kFileExt[] = ".heap"; | 101 const char HeapProfileTable::kFileExt[] = ".heap"; |
102 | 102 |
103 //---------------------------------------------------------------------- | 103 //---------------------------------------------------------------------- |
104 | 104 |
105 // Size for alloc_table_ and mmap_table_. | 105 static const int kHashTableSize = 179999; // Size for bucket_table_. |
106 static const int kHashTableSize = 179999; | |
107 /*static*/ const int HeapProfileTable::kMaxStackDepth; | 106 /*static*/ const int HeapProfileTable::kMaxStackDepth; |
108 | 107 |
109 //---------------------------------------------------------------------- | 108 //---------------------------------------------------------------------- |
110 | 109 |
111 // We strip out different number of stack frames in debug mode | 110 // We strip out different number of stack frames in debug mode |
112 // because less inlining happens in that case | 111 // because less inlining happens in that case |
113 #ifdef NDEBUG | 112 #ifdef NDEBUG |
114 static const int kStripFrames = 2; | 113 static const int kStripFrames = 2; |
115 #else | 114 #else |
116 static const int kStripFrames = 3; | 115 static const int kStripFrames = 3; |
117 #endif | 116 #endif |
118 | 117 |
119 // For sorting Stats or Buckets by in-use space | 118 // For sorting Stats or Buckets by in-use space |
120 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, | 119 static bool ByAllocatedSpace(HeapProfileTable::Stats* a, |
121 HeapProfileTable::Stats* b) { | 120 HeapProfileTable::Stats* b) { |
122 // Return true iff "a" has more allocated space than "b" | 121 // Return true iff "a" has more allocated space than "b" |
123 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); | 122 return (a->alloc_size - a->free_size) > (b->alloc_size - b->free_size); |
124 } | 123 } |
125 | 124 |
126 //---------------------------------------------------------------------- | 125 //---------------------------------------------------------------------- |
127 | 126 |
128 HeapProfileTable::HeapProfileTable(Allocator alloc, DeAllocator dealloc) | 127 HeapProfileTable::HeapProfileTable(Allocator alloc, |
128 DeAllocator dealloc, | |
129 bool profile_mmap) | |
129 : alloc_(alloc), | 130 : alloc_(alloc), |
130 dealloc_(dealloc), | 131 dealloc_(dealloc), |
131 num_alloc_buckets_(0), | 132 bucket_table_(NULL), |
132 mmap_table_(NULL), | 133 profile_mmap_(profile_mmap), |
133 num_available_mmap_buckets_(0), | 134 num_buckets_(0), |
134 mmap_address_map_(NULL) { | 135 address_map_(NULL) { |
135 // Initialize the overall profile stats. | 136 // Make the table |
Alexander Potapenko
2013/03/07 06:48:38
Please mind the periods at the end of one-line com
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
These comments are actually from the original TCMa
| |
137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); | |
138 bucket_table_ = reinterpret_cast<Bucket**>(alloc_(table_bytes)); | |
139 memset(bucket_table_, 0, table_bytes); | |
140 // Make allocation map | |
141 address_map_ = | |
142 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
Alexander Potapenko
2013/03/07 06:48:38
4-space indentation here.
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
Ditto, but done.
| |
143 // init the rest: | |
136 memset(&total_, 0, sizeof(total_)); | 144 memset(&total_, 0, sizeof(total_)); |
137 | 145 num_buckets_ = 0; |
138 // Make the malloc table. | |
139 const int alloc_table_bytes = kHashTableSize * sizeof(*alloc_table_); | |
140 alloc_table_ = reinterpret_cast<Bucket**>(alloc_(alloc_table_bytes)); | |
141 memset(alloc_table_, 0, alloc_table_bytes); | |
142 | |
143 // Make malloc and mmap allocation maps. | |
144 alloc_address_map_ = | |
145 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); | |
146 } | 146 } |
147 | 147 |
148 HeapProfileTable::~HeapProfileTable() { | 148 HeapProfileTable::~HeapProfileTable() { |
149 DeallocateBucketTable(alloc_table_); | 149 // free allocation map |
Alexander Potapenko
2013/03/07 06:08:25
Uppercase for the first letter, period at the end
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
Ditto, but done.
| |
150 alloc_table_ = NULL; | 150 address_map_->~AllocationMap(); |
151 DeallocateBucketTable(mmap_table_); | 151 dealloc_(address_map_); |
152 mmap_table_ = NULL; | 152 address_map_ = NULL; |
153 DeallocateAllocationMap(alloc_address_map_); | 153 // free hash table |
154 alloc_address_map_ = NULL; | 154 for (int b = 0; b < kHashTableSize; b++) { |
155 DeallocateAllocationMap(mmap_address_map_); | 155 for (Bucket* x = bucket_table_[b]; x != 0; /**/) { |
Alexander Potapenko
2013/03/07 06:48:38
Why not put x = x->next into the loop statement? Y
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
It's also from the original TCMalloc.
Simply movi
| |
156 mmap_address_map_ = NULL; | 156 Bucket* b = x; |
157 x = x->next; | |
158 dealloc_(b->stack); | |
159 dealloc_(b); | |
160 } | |
161 } | |
162 dealloc_(bucket_table_); | |
163 bucket_table_ = NULL; | |
157 } | 164 } |
158 | 165 |
159 void HeapProfileTable::DeallocateAllocationMap(AllocationMap* allocation) { | 166 HeapProfileTable::Bucket* HeapProfileTable::GetBucket(int depth, |
160 if (allocation != NULL) { | 167 const void* const key[]) { |
161 alloc_address_map_->~AllocationMap(); | |
162 dealloc_(allocation); | |
163 } | |
164 } | |
165 | |
166 void HeapProfileTable::DeallocateBucketTable(Bucket** table) { | |
167 if (table != NULL) { | |
168 for (int b = 0; b < kHashTableSize; b++) { | |
169 for (Bucket* x = table[b]; x != 0; /**/) { | |
170 Bucket* b = x; | |
171 x = x->next; | |
172 dealloc_(b->stack); | |
173 dealloc_(b); | |
174 } | |
175 } | |
176 dealloc_(table); | |
177 } | |
178 } | |
179 | |
180 HeapProfileTable::Bucket* HeapProfileTable::GetBucket( | |
181 int depth, const void* const key[], Bucket** table, | |
182 int* bucket_count) { | |
183 // Make hash-value | 168 // Make hash-value |
184 uintptr_t h = 0; | 169 uintptr_t h = 0; |
185 for (int i = 0; i < depth; i++) { | 170 for (int i = 0; i < depth; i++) { |
186 h += reinterpret_cast<uintptr_t>(key[i]); | 171 h += reinterpret_cast<uintptr_t>(key[i]); |
187 h += h << 10; | 172 h += h << 10; |
188 h ^= h >> 6; | 173 h ^= h >> 6; |
189 } | 174 } |
190 h += h << 3; | 175 h += h << 3; |
191 h ^= h >> 11; | 176 h ^= h >> 11; |
192 | 177 |
193 // Lookup stack trace in table | 178 // Lookup stack trace in table |
194 unsigned int buck = ((unsigned int) h) % kHashTableSize; | 179 unsigned int buck = ((unsigned int) h) % kHashTableSize; |
195 for (Bucket* b = table[buck]; b != 0; b = b->next) { | 180 for (Bucket* b = bucket_table_[buck]; b != 0; b = b->next) { |
196 if ((b->hash == h) && | 181 if ((b->hash == h) && |
197 (b->depth == depth) && | 182 (b->depth == depth) && |
198 equal(key, key + depth, b->stack)) { | 183 equal(key, key + depth, b->stack)) { |
199 return b; | 184 return b; |
200 } | 185 } |
201 } | 186 } |
202 | 187 |
203 // Create new bucket | 188 // Create new bucket |
204 const size_t key_size = sizeof(key[0]) * depth; | 189 const size_t key_size = sizeof(key[0]) * depth; |
205 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); | 190 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
206 copy(key, key + depth, kcopy); | 191 copy(key, key + depth, kcopy); |
207 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); | 192 Bucket* b = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
208 memset(b, 0, sizeof(*b)); | 193 memset(b, 0, sizeof(*b)); |
209 b->hash = h; | 194 b->hash = h; |
210 b->depth = depth; | 195 b->depth = depth; |
211 b->stack = kcopy; | 196 b->stack = kcopy; |
212 b->next = table[buck]; | 197 b->next = bucket_table_[buck]; |
213 table[buck] = b; | 198 bucket_table_[buck] = b; |
214 if (bucket_count != NULL) { | 199 num_buckets_++; |
215 ++(*bucket_count); | |
216 } | |
217 return b; | 200 return b; |
218 } | 201 } |
219 | 202 |
220 int HeapProfileTable::GetCallerStackTrace( | 203 int HeapProfileTable::GetCallerStackTrace( |
221 int skip_count, void* stack[kMaxStackDepth]) { | 204 int skip_count, void* stack[kMaxStackDepth]) { |
222 return MallocHook::GetCallerStackTrace( | 205 return MallocHook::GetCallerStackTrace( |
223 stack, kMaxStackDepth, kStripFrames + skip_count + 1); | 206 stack, kMaxStackDepth, kStripFrames + skip_count + 1); |
224 } | 207 } |
225 | 208 |
226 void HeapProfileTable::RecordAlloc( | 209 void HeapProfileTable::RecordAlloc( |
227 const void* ptr, size_t bytes, int stack_depth, | 210 const void* ptr, size_t bytes, int stack_depth, |
228 const void* const call_stack[]) { | 211 const void* const call_stack[]) { |
229 Bucket* b = GetBucket(stack_depth, call_stack, alloc_table_, | 212 Bucket* b = GetBucket(stack_depth, call_stack); |
230 &num_alloc_buckets_); | |
231 b->allocs++; | 213 b->allocs++; |
232 b->alloc_size += bytes; | 214 b->alloc_size += bytes; |
233 total_.allocs++; | 215 total_.allocs++; |
234 total_.alloc_size += bytes; | 216 total_.alloc_size += bytes; |
235 | 217 |
236 AllocValue v; | 218 AllocValue v; |
237 v.set_bucket(b); // also did set_live(false); set_ignore(false) | 219 v.set_bucket(b); // also did set_live(false); set_ignore(false) |
238 v.bytes = bytes; | 220 v.bytes = bytes; |
239 alloc_address_map_->Insert(ptr, v); | 221 address_map_->Insert(ptr, v); |
240 } | 222 } |
241 | 223 |
242 void HeapProfileTable::RecordFree(const void* ptr) { | 224 void HeapProfileTable::RecordFree(const void* ptr) { |
243 AllocValue v; | 225 AllocValue v; |
244 if (alloc_address_map_->FindAndRemove(ptr, &v)) { | 226 if (address_map_->FindAndRemove(ptr, &v)) { |
245 Bucket* b = v.bucket(); | 227 Bucket* b = v.bucket(); |
246 b->frees++; | 228 b->frees++; |
247 b->free_size += v.bytes; | 229 b->free_size += v.bytes; |
248 total_.frees++; | 230 total_.frees++; |
249 total_.free_size += v.bytes; | 231 total_.free_size += v.bytes; |
250 } | 232 } |
251 } | 233 } |
252 | 234 |
253 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { | 235 bool HeapProfileTable::FindAlloc(const void* ptr, size_t* object_size) const { |
254 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 236 const AllocValue* alloc_value = address_map_->Find(ptr); |
255 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 237 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
256 return alloc_value != NULL; | 238 return alloc_value != NULL; |
257 } | 239 } |
258 | 240 |
259 bool HeapProfileTable::FindAllocDetails(const void* ptr, | 241 bool HeapProfileTable::FindAllocDetails(const void* ptr, |
260 AllocInfo* info) const { | 242 AllocInfo* info) const { |
261 const AllocValue* alloc_value = alloc_address_map_->Find(ptr); | 243 const AllocValue* alloc_value = address_map_->Find(ptr); |
262 if (alloc_value != NULL) { | 244 if (alloc_value != NULL) { |
263 info->object_size = alloc_value->bytes; | 245 info->object_size = alloc_value->bytes; |
264 info->call_stack = alloc_value->bucket()->stack; | 246 info->call_stack = alloc_value->bucket()->stack; |
265 info->stack_depth = alloc_value->bucket()->depth; | 247 info->stack_depth = alloc_value->bucket()->depth; |
266 } | 248 } |
267 return alloc_value != NULL; | 249 return alloc_value != NULL; |
268 } | 250 } |
269 | 251 |
270 bool HeapProfileTable::FindInsideAlloc(const void* ptr, | 252 bool HeapProfileTable::FindInsideAlloc(const void* ptr, |
271 size_t max_size, | 253 size_t max_size, |
272 const void** object_ptr, | 254 const void** object_ptr, |
273 size_t* object_size) const { | 255 size_t* object_size) const { |
274 const AllocValue* alloc_value = | 256 const AllocValue* alloc_value = |
275 alloc_address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); | 257 address_map_->FindInside(&AllocValueSize, max_size, ptr, object_ptr); |
276 if (alloc_value != NULL) *object_size = alloc_value->bytes; | 258 if (alloc_value != NULL) *object_size = alloc_value->bytes; |
277 return alloc_value != NULL; | 259 return alloc_value != NULL; |
278 } | 260 } |
279 | 261 |
280 bool HeapProfileTable::MarkAsLive(const void* ptr) { | 262 bool HeapProfileTable::MarkAsLive(const void* ptr) { |
281 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 263 AllocValue* alloc = address_map_->FindMutable(ptr); |
282 if (alloc && !alloc->live()) { | 264 if (alloc && !alloc->live()) { |
283 alloc->set_live(true); | 265 alloc->set_live(true); |
284 return true; | 266 return true; |
285 } | 267 } |
286 return false; | 268 return false; |
287 } | 269 } |
288 | 270 |
289 void HeapProfileTable::MarkAsIgnored(const void* ptr) { | 271 void HeapProfileTable::MarkAsIgnored(const void* ptr) { |
290 AllocValue* alloc = alloc_address_map_->FindMutable(ptr); | 272 AllocValue* alloc = address_map_->FindMutable(ptr); |
291 if (alloc) { | 273 if (alloc) { |
292 alloc->set_ignore(true); | 274 alloc->set_ignore(true); |
293 } | 275 } |
294 } | 276 } |
295 | 277 |
296 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f, | 278 void HeapProfileTable::IterateAllocationAddresses(AddressIterator f, |
297 void* data) { | 279 void* data) { |
298 const AllocationAddressIteratorArgs args(f, data); | 280 const AllocationAddressIteratorArgs args(f, data); |
299 alloc_address_map_->Iterate<const AllocationAddressIteratorArgs&>( | 281 address_map_->Iterate<const AllocationAddressIteratorArgs&>( |
300 AllocationAddressesIterator, args); | 282 AllocationAddressesIterator, args); |
301 } | 283 } |
302 | 284 |
303 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { | 285 void HeapProfileTable::MarkCurrentAllocations(AllocationMark mark) { |
304 const MarkArgs args(mark, true); | 286 const MarkArgs args(mark, true); |
305 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); | 287 address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
306 } | 288 } |
307 | 289 |
308 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { | 290 void HeapProfileTable::MarkUnmarkedAllocations(AllocationMark mark) { |
309 const MarkArgs args(mark, true); | 291 const MarkArgs args(mark, true); |
310 alloc_address_map_->Iterate<const MarkArgs&>(MarkIterator, args); | 292 address_map_->Iterate<const MarkArgs&>(MarkIterator, args); |
311 } | 293 } |
312 | 294 |
313 // We'd be happier using snprintfer, but we don't to reduce dependencies. | 295 // We'd be happier using snprintfer, but we don't to reduce dependencies. |
314 int HeapProfileTable::UnparseBucket(const Bucket& b, | 296 int HeapProfileTable::UnparseBucket(const Bucket& b, |
315 char* buf, int buflen, int bufsize, | 297 char* buf, int buflen, int bufsize, |
316 const char* extra, | 298 const char* extra, |
317 Stats* profile_stats) { | 299 Stats* profile_stats) { |
318 if (profile_stats != NULL) { | 300 if (profile_stats != NULL) { |
319 profile_stats->allocs += b.allocs; | 301 profile_stats->allocs += b.allocs; |
320 profile_stats->alloc_size += b.alloc_size; | 302 profile_stats->alloc_size += b.alloc_size; |
(...skipping 17 matching lines...) Expand all Loading... | |
338 buflen += printed; | 320 buflen += printed; |
339 } | 321 } |
340 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 322 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
341 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 323 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
342 buflen += printed; | 324 buflen += printed; |
343 return buflen; | 325 return buflen; |
344 } | 326 } |
345 | 327 |
346 HeapProfileTable::Bucket** | 328 HeapProfileTable::Bucket** |
347 HeapProfileTable::MakeSortedBucketList() const { | 329 HeapProfileTable::MakeSortedBucketList() const { |
348 Bucket** list = reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * | 330 Bucket** list = |
349 (num_alloc_buckets_ + num_available_mmap_buckets_))); | 331 reinterpret_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
Alexander Potapenko
2013/03/07 06:48:38
4-space indentation.
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
It's also from the original TCMalloc, but done.
| |
350 | |
351 RAW_DCHECK(mmap_table_ != NULL || num_available_mmap_buckets_ == 0, ""); | |
352 | 332 |
353 int n = 0; | 333 int n = 0; |
354 | |
355 for (int b = 0; b < kHashTableSize; b++) { | 334 for (int b = 0; b < kHashTableSize; b++) { |
356 for (Bucket* x = alloc_table_[b]; x != 0; x = x->next) { | 335 for (Bucket* x = bucket_table_[b]; x != 0; x = x->next) { |
357 list[n++] = x; | 336 list[n++] = x; |
358 } | 337 } |
359 } | 338 } |
360 RAW_DCHECK(n == num_alloc_buckets_, ""); | 339 RAW_DCHECK(n == num_buckets_, ""); |
361 | 340 |
362 if (mmap_table_ != NULL) { | 341 sort(list, list + num_buckets_, ByAllocatedSpace); |
363 for (int b = 0; b < kHashTableSize; b++) { | |
364 for (Bucket* x = mmap_table_[b]; x != 0; x = x->next) { | |
365 list[n++] = x; | |
366 } | |
367 } | |
368 } | |
369 RAW_DCHECK(n == num_alloc_buckets_ + num_available_mmap_buckets_, ""); | |
370 | |
371 sort(list, list + num_alloc_buckets_ + num_available_mmap_buckets_, | |
372 ByAllocatedSpace); | |
373 | 342 |
374 return list; | 343 return list; |
375 } | 344 } |
376 | 345 |
377 void HeapProfileTable::RefreshMMapData(Allocator mmap_alloc, | |
378 DeAllocator mmap_dealloc) { | |
379 // Make the table | |
380 static const int mmap_table_bytes = kHashTableSize * sizeof(*mmap_table_); | |
381 if (mmap_table_ == NULL) { | |
382 mmap_table_ = reinterpret_cast<Bucket**>(alloc_(mmap_table_bytes)); | |
383 memset(mmap_table_, 0, mmap_table_bytes); | |
384 } | |
385 num_available_mmap_buckets_ = 0; | |
386 | |
387 ClearMMapData(); | |
388 mmap_address_map_ = new(alloc_(sizeof(AllocationMap))) | |
389 AllocationMap(mmap_alloc, mmap_dealloc); | |
390 | |
391 MemoryRegionMap::LockHolder l; | |
392 for (MemoryRegionMap::RegionIterator r = | |
393 MemoryRegionMap::BeginRegionLocked(); | |
394 r != MemoryRegionMap::EndRegionLocked(); ++r) { | |
395 Bucket* b = | |
396 GetBucket(r->call_stack_depth, r->call_stack, mmap_table_, NULL); | |
397 if (b->alloc_size == 0) { | |
398 num_available_mmap_buckets_ += 1; | |
399 } | |
400 b->allocs += 1; | |
401 b->alloc_size += r->end_addr - r->start_addr; | |
402 | |
403 AllocValue v; | |
404 v.set_bucket(b); | |
405 v.bytes = r->end_addr - r->start_addr; | |
406 mmap_address_map_->Insert(reinterpret_cast<const void*>(r->start_addr), v); | |
407 } | |
408 } | |
409 | |
410 void HeapProfileTable::ClearMMapData() { | |
411 if (mmap_address_map_ == NULL) return; | |
412 | |
413 mmap_address_map_->Iterate(ZeroBucketCountsIterator, this); | |
414 mmap_address_map_->~AllocationMap(); | |
415 dealloc_(mmap_address_map_); | |
416 mmap_address_map_ = NULL; | |
417 } | |
418 | |
419 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, | 346 void HeapProfileTable::DumpMarkedObjects(AllocationMark mark, |
420 const char* file_name) { | 347 const char* file_name) { |
421 RawFD fd = RawOpenForWriting(file_name); | 348 RawFD fd = RawOpenForWriting(file_name); |
422 if (fd == kIllegalRawFD) { | 349 if (fd == kIllegalRawFD) { |
423 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); | 350 RAW_LOG(ERROR, "Failed dumping live objects to %s", file_name); |
424 return; | 351 return; |
425 } | 352 } |
426 const DumpMarkedArgs args(fd, mark); | 353 const DumpMarkedArgs args(fd, mark); |
427 alloc_address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); | 354 address_map_->Iterate<const DumpMarkedArgs&>(DumpMarkedIterator, args); |
428 RawClose(fd); | 355 RawClose(fd); |
429 } | 356 } |
430 | 357 |
431 #if defined(TYPE_PROFILING) | 358 #if defined(TYPE_PROFILING) |
432 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const { | 359 void HeapProfileTable::DumpTypeStatistics(const char* file_name) const { |
433 RawFD fd = RawOpenForWriting(file_name); | 360 RawFD fd = RawOpenForWriting(file_name); |
434 if (fd == kIllegalRawFD) { | 361 if (fd == kIllegalRawFD) { |
435 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name); | 362 RAW_LOG(ERROR, "Failed dumping type statistics to %s", file_name); |
436 return; | 363 return; |
437 } | 364 } |
438 | 365 |
439 AddressMap<TypeCount>* type_size_map; | 366 AddressMap<TypeCount>* type_size_map; |
440 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>))) | 367 type_size_map = new(alloc_(sizeof(AddressMap<TypeCount>))) |
441 AddressMap<TypeCount>(alloc_, dealloc_); | 368 AddressMap<TypeCount>(alloc_, dealloc_); |
442 alloc_address_map_->Iterate(TallyTypesItererator, type_size_map); | 369 address_map_->Iterate(TallyTypesItererator, type_size_map); |
443 | 370 |
444 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader)); | 371 RawWrite(fd, kTypeProfileStatsHeader, strlen(kTypeProfileStatsHeader)); |
445 const DumpArgs args(fd, NULL); | 372 const DumpArgs args(fd, NULL); |
446 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args); | 373 type_size_map->Iterate<const DumpArgs&>(DumpTypesIterator, args); |
447 RawClose(fd); | 374 RawClose(fd); |
448 | 375 |
449 type_size_map->~AddressMap<TypeCount>(); | 376 type_size_map->~AddressMap<TypeCount>(); |
450 dealloc_(type_size_map); | 377 dealloc_(type_size_map); |
451 } | 378 } |
452 #endif // defined(TYPE_PROFILING) | 379 #endif // defined(TYPE_PROFILING) |
453 | 380 |
454 void HeapProfileTable::IterateOrderedAllocContexts( | 381 void HeapProfileTable::IterateOrderedAllocContexts( |
455 AllocContextIterator callback) const { | 382 AllocContextIterator callback) const { |
456 Bucket** list = MakeSortedBucketList(); | 383 Bucket** list = MakeSortedBucketList(); |
457 AllocContextInfo info; | 384 AllocContextInfo info; |
458 for (int i = 0; i < num_alloc_buckets_; ++i) { | 385 for (int i = 0; i < num_buckets_; ++i) { |
459 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); | 386 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); |
460 info.stack_depth = list[i]->depth; | 387 info.stack_depth = list[i]->depth; |
461 info.call_stack = list[i]->stack; | 388 info.call_stack = list[i]->stack; |
462 callback(info); | 389 callback(info); |
463 } | 390 } |
464 dealloc_(list); | 391 dealloc_(list); |
465 } | 392 } |
466 | 393 |
467 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { | 394 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { |
468 Bucket** list = MakeSortedBucketList(); | 395 Bucket** list = MakeSortedBucketList(); |
(...skipping 11 matching lines...) Expand all Loading... | |
480 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); | 407 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); |
481 RAW_DCHECK(map_length <= size, ""); | 408 RAW_DCHECK(map_length <= size, ""); |
482 char* const map_start = buf + size - map_length; // move to end | 409 char* const map_start = buf + size - map_length; // move to end |
483 memmove(map_start, buf, map_length); | 410 memmove(map_start, buf, map_length); |
484 size -= map_length; | 411 size -= map_length; |
485 | 412 |
486 Stats stats; | 413 Stats stats; |
487 memset(&stats, 0, sizeof(stats)); | 414 memset(&stats, 0, sizeof(stats)); |
488 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); | 415 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); |
489 if (bucket_length < 0 || bucket_length >= size) return 0; | 416 if (bucket_length < 0 || bucket_length >= size) return 0; |
490 Bucket total_with_mmap(total_); | 417 bucket_length = UnparseBucket(total_, buf, bucket_length, size, |
491 if (mmap_table_ != NULL) { | 418 " heapprofile", &stats); |
492 total_with_mmap.alloc_size += MemoryRegionMap::MapSize(); | 419 |
493 total_with_mmap.free_size += MemoryRegionMap::UnmapSize(); | 420 // Dump the mmap list first. |
421 if (profile_mmap_) { | |
422 BufferArgs buffer(buf, bucket_length, size); | |
423 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer); | |
424 bucket_length = buffer.buflen; | |
494 } | 425 } |
495 bucket_length = UnparseBucket(total_with_mmap, buf, bucket_length, size, | 426 |
496 " heapprofile", &stats); | 427 for (int i = 0; i < num_buckets_; i++) { |
497 for (int i = 0; i < num_alloc_buckets_; i++) { | |
498 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", | 428 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", |
499 &stats); | 429 &stats); |
500 } | 430 } |
501 RAW_DCHECK(bucket_length < size, ""); | 431 RAW_DCHECK(bucket_length < size, ""); |
502 | 432 |
503 dealloc_(list); | 433 dealloc_(list); |
504 | 434 |
505 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 435 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
506 memmove(buf + bucket_length, map_start, map_length); // close the gap | 436 memmove(buf + bucket_length, map_start, map_length); // close the gap |
507 | 437 |
508 return bucket_length + map_length; | 438 return bucket_length + map_length; |
509 } | 439 } |
510 | 440 |
441 // static | |
442 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, | |
443 BufferArgs* args) { | |
444 args->buflen = | |
445 UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, "mmap", NUL L); | |
Alexander Potapenko
2013/03/07 06:15:13
This line seems to be longer than 80 chars. Is it?
Dai Mikurube (NOT FULLTIME)
2013/03/07 12:32:16
Done.
| |
446 } | |
447 | |
511 #if defined(TYPE_PROFILING) | 448 #if defined(TYPE_PROFILING) |
512 // static | 449 // static |
513 void HeapProfileTable::TallyTypesItererator( | 450 void HeapProfileTable::TallyTypesItererator( |
514 const void* ptr, | 451 const void* ptr, |
515 AllocValue* value, | 452 AllocValue* value, |
516 AddressMap<TypeCount>* type_size_map) { | 453 AddressMap<TypeCount>* type_size_map) { |
517 const std::type_info* type = LookupType(ptr); | 454 const std::type_info* type = LookupType(ptr); |
518 | 455 |
519 const void* key = NULL; | 456 const void* key = NULL; |
520 if (type) | 457 if (type) |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
591 } | 528 } |
592 | 529 |
593 inline | 530 inline |
594 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, | 531 void HeapProfileTable::MarkIterator(const void* ptr, AllocValue* v, |
595 const MarkArgs& args) { | 532 const MarkArgs& args) { |
596 if (!args.mark_all && v->mark() != UNMARKED) | 533 if (!args.mark_all && v->mark() != UNMARKED) |
597 return; | 534 return; |
598 v->set_mark(args.mark); | 535 v->set_mark(args.mark); |
599 } | 536 } |
600 | 537 |
601 inline void HeapProfileTable::ZeroBucketCountsIterator( | |
602 const void* ptr, AllocValue* v, HeapProfileTable* heap_profile) { | |
603 Bucket* b = v->bucket(); | |
604 if (b != NULL) { | |
605 b->allocs = 0; | |
606 b->alloc_size = 0; | |
607 b->free_size = 0; | |
608 b->frees = 0; | |
609 } | |
610 } | |
611 | |
612 // Callback from NonLiveSnapshot; adds entry to arg->dest | 538 // Callback from NonLiveSnapshot; adds entry to arg->dest |
613 // if not the entry is not live and is not present in arg->base. | 539 // if not the entry is not live and is not present in arg->base. |
614 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, | 540 void HeapProfileTable::AddIfNonLive(const void* ptr, AllocValue* v, |
615 AddNonLiveArgs* arg) { | 541 AddNonLiveArgs* arg) { |
616 if (v->live()) { | 542 if (v->live()) { |
617 v->set_live(false); | 543 v->set_live(false); |
618 } else { | 544 } else { |
619 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { | 545 if (arg->base != NULL && arg->base->map_.Find(ptr) != NULL) { |
620 // Present in arg->base, so do not save | 546 // Present in arg->base, so do not save |
621 } else { | 547 } else { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
668 } | 594 } |
669 } | 595 } |
670 globfree(&g); | 596 globfree(&g); |
671 #else /* HAVE_GLOB_H */ | 597 #else /* HAVE_GLOB_H */ |
672 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); | 598 RAW_LOG(WARNING, "Unable to remove old heap profiles (can't run glob())"); |
673 #endif | 599 #endif |
674 } | 600 } |
675 | 601 |
676 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { | 602 HeapProfileTable::Snapshot* HeapProfileTable::TakeSnapshot() { |
677 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 603 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
678 alloc_address_map_->Iterate(AddToSnapshot, s); | 604 address_map_->Iterate(AddToSnapshot, s); |
679 return s; | 605 return s; |
680 } | 606 } |
681 | 607 |
682 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { | 608 void HeapProfileTable::ReleaseSnapshot(Snapshot* s) { |
683 s->~Snapshot(); | 609 s->~Snapshot(); |
684 dealloc_(s); | 610 dealloc_(s); |
685 } | 611 } |
686 | 612 |
687 // Callback from TakeSnapshot; adds a single entry to snapshot | 613 // Callback from TakeSnapshot; adds a single entry to snapshot |
688 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, | 614 void HeapProfileTable::AddToSnapshot(const void* ptr, AllocValue* v, |
689 Snapshot* snapshot) { | 615 Snapshot* snapshot) { |
690 snapshot->Add(ptr, *v); | 616 snapshot->Add(ptr, *v); |
691 } | 617 } |
692 | 618 |
693 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( | 619 HeapProfileTable::Snapshot* HeapProfileTable::NonLiveSnapshot( |
694 Snapshot* base) { | 620 Snapshot* base) { |
695 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", | 621 RAW_VLOG(2, "NonLiveSnapshot input: %d %d\n", |
696 int(total_.allocs - total_.frees), | 622 int(total_.allocs - total_.frees), |
697 int(total_.alloc_size - total_.free_size)); | 623 int(total_.alloc_size - total_.free_size)); |
698 | 624 |
699 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); | 625 Snapshot* s = new (alloc_(sizeof(Snapshot))) Snapshot(alloc_, dealloc_); |
700 AddNonLiveArgs args; | 626 AddNonLiveArgs args; |
701 args.dest = s; | 627 args.dest = s; |
702 args.base = base; | 628 args.base = base; |
703 alloc_address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); | 629 address_map_->Iterate<AddNonLiveArgs*>(AddIfNonLive, &args); |
704 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", | 630 RAW_VLOG(2, "NonLiveSnapshot output: %d %d\n", |
705 int(s->total_.allocs - s->total_.frees), | 631 int(s->total_.allocs - s->total_.frees), |
706 int(s->total_.alloc_size - s->total_.free_size)); | 632 int(s->total_.alloc_size - s->total_.free_size)); |
707 return s; | 633 return s; |
708 } | 634 } |
709 | 635 |
710 // Information kept per unique bucket seen | 636 // Information kept per unique bucket seen |
711 struct HeapProfileTable::Snapshot::Entry { | 637 struct HeapProfileTable::Snapshot::Entry { |
712 int count; | 638 int count; |
713 int bytes; | 639 int bytes; |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
813 char* unused) { | 739 char* unused) { |
814 // Perhaps also log the allocation stack trace (unsymbolized) | 740 // Perhaps also log the allocation stack trace (unsymbolized) |
815 // on this line in case somebody finds it useful. | 741 // on this line in case somebody finds it useful. |
816 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 742 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
817 } | 743 } |
818 | 744 |
819 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 745 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
820 char unused; | 746 char unused; |
821 map_.Iterate(ReportObject, &unused); | 747 map_.Iterate(ReportObject, &unused); |
822 } | 748 } |
OLD | NEW |