OLD | NEW |
1 // Copyright (c) 2006, Google Inc. | 1 // Copyright (c) 2006, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
321 reinterpret_cast<uintptr_t>(b.stack[d])); | 321 reinterpret_cast<uintptr_t>(b.stack[d])); |
322 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 322 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
323 buflen += printed; | 323 buflen += printed; |
324 } | 324 } |
325 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); | 325 printed = snprintf(buf + buflen, bufsize - buflen, "\n"); |
326 if (printed < 0 || printed >= bufsize - buflen) return buflen; | 326 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
327 buflen += printed; | 327 buflen += printed; |
328 return buflen; | 328 return buflen; |
329 } | 329 } |
330 | 330 |
| 331 int HeapProfileTable::UnparseBucket2(const Bucket& b, |
| 332 char* buf, int buflen, int bufsize, |
| 333 Stats* profile_stats) { |
| 334 if (profile_stats != NULL) { |
| 335 profile_stats->allocs += b.allocs; |
| 336 profile_stats->alloc_size += b.alloc_size; |
| 337 profile_stats->frees += b.frees; |
| 338 profile_stats->free_size += b.free_size; |
| 339 } |
| 340 int printed = snprintf(buf + buflen, |
| 341 bufsize - buflen, |
| 342 "{" |
| 343 "\"trace\": \""); |
| 344 buflen += printed; |
| 345 for (int d = 0; d < b.depth; d++) { |
| 346 // OMG what a hack |
| 347 printed = snprintf(buf + buflen, bufsize - buflen, "%s ", |
| 348 reinterpret_cast<const char*>(b.stack[d])); |
| 349 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 350 buflen += printed; |
| 351 } |
| 352 printed = snprintf(buf + buflen, |
| 353 bufsize - buflen, |
| 354 "\", " |
| 355 "\"current_allocs\": %d, " |
| 356 "\"current_bytes\": %" PRId64 ", " |
| 357 "\"total_allocs\": %d, " |
| 358 "\"total_bytes\": %" PRId64 |
| 359 "}", |
| 360 b.allocs - b.frees, |
| 361 b.alloc_size - b.free_size, |
| 362 b.allocs, |
| 363 b.alloc_size); |
| 364 // If it looks like the snprintf failed, ignore the fact we printed anything |
| 365 if (printed < 0 || printed >= bufsize - buflen) return buflen; |
| 366 buflen += printed; |
| 367 return buflen; |
| 368 } |
| 369 |
331 HeapProfileTable::Bucket** | 370 HeapProfileTable::Bucket** |
332 HeapProfileTable::MakeSortedBucketList() const { | 371 HeapProfileTable::MakeSortedBucketList() const { |
333 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); | 372 Bucket** list = static_cast<Bucket**>(alloc_(sizeof(Bucket) * num_buckets_)); |
334 | 373 |
335 int bucket_count = 0; | 374 int bucket_count = 0; |
336 for (int i = 0; i < kHashTableSize; i++) { | 375 for (int i = 0; i < kHashTableSize; i++) { |
337 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) { | 376 for (Bucket* curr = bucket_table_[i]; curr != 0; curr = curr->next) { |
338 list[bucket_count++] = curr; | 377 list[bucket_count++] = curr; |
339 } | 378 } |
340 } | 379 } |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
433 RAW_DCHECK(bucket_length < size, ""); | 472 RAW_DCHECK(bucket_length < size, ""); |
434 | 473 |
435 dealloc_(list); | 474 dealloc_(list); |
436 | 475 |
437 RAW_DCHECK(buf + bucket_length <= map_start, ""); | 476 RAW_DCHECK(buf + bucket_length <= map_start, ""); |
438 memmove(buf + bucket_length, map_start, map_length); // close the gap | 477 memmove(buf + bucket_length, map_start, map_length); // close the gap |
439 | 478 |
440 return bucket_length + map_length; | 479 return bucket_length + map_length; |
441 } | 480 } |
442 | 481 |
| 482 // TODO(jamescook): Make a subclass and override this method. |
| 483 int HeapProfileTable::FillOrderedProfile2(char buffer[], |
| 484 int buffer_size) const { |
| 485 Bucket** list = MakeSortedBucketList(); |
| 486 |
| 487 Stats stats; |
| 488 memset(&stats, 0, sizeof(stats)); |
| 489 int written = snprintf(buffer, buffer_size, "[\n"); |
| 490 written = UnparseBucket2(total_, buffer, written, buffer_size, &stats); |
| 491 |
| 492 for (int i = 0; i < num_buckets_; i++) { |
| 493 written += snprintf(buffer + written, buffer_size - written, ",\n"); |
| 494 written = UnparseBucket2(*list[i], buffer, written, buffer_size, &stats); |
| 495 } |
| 496 RAW_DCHECK(written < buffer_size, ""); |
| 497 |
| 498 written += snprintf(buffer + written, buffer_size - written, "\n]\n"); |
| 499 |
| 500 dealloc_(list); |
| 501 |
| 502 return written; |
| 503 } |
| 504 |
443 // static | 505 // static |
444 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, | 506 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, |
445 BufferArgs* args) { | 507 BufferArgs* args) { |
446 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, | 508 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, |
447 "", NULL); | 509 "", NULL); |
448 } | 510 } |
449 | 511 |
450 #if defined(TYPE_PROFILING) | 512 #if defined(TYPE_PROFILING) |
451 // static | 513 // static |
452 void HeapProfileTable::TallyTypesItererator( | 514 void HeapProfileTable::TallyTypesItererator( |
(...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
741 char* unused) { | 803 char* unused) { |
742 // Perhaps also log the allocation stack trace (unsymbolized) | 804 // Perhaps also log the allocation stack trace (unsymbolized) |
743 // on this line in case somebody finds it useful. | 805 // on this line in case somebody finds it useful. |
744 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); | 806 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); |
745 } | 807 } |
746 | 808 |
747 void HeapProfileTable::Snapshot::ReportIndividualObjects() { | 809 void HeapProfileTable::Snapshot::ReportIndividualObjects() { |
748 char unused; | 810 char unused; |
749 map_.Iterate(ReportObject, &unused); | 811 map_.Iterate(ReportObject, &unused); |
750 } | 812 } |
OLD | NEW |