Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: third_party/tcmalloc/chromium/src/heap-profile-table.cc

Issue 15418002: Record Chrome trace events in tcmalloc heap profiles (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: review comments 2 Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2006, Google Inc. 1 // Copyright (c) 2006, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after
124 124
125 //---------------------------------------------------------------------- 125 //----------------------------------------------------------------------
126 126
127 HeapProfileTable::HeapProfileTable(Allocator alloc, 127 HeapProfileTable::HeapProfileTable(Allocator alloc,
128 DeAllocator dealloc, 128 DeAllocator dealloc,
129 bool profile_mmap) 129 bool profile_mmap)
130 : alloc_(alloc), 130 : alloc_(alloc),
131 dealloc_(dealloc), 131 dealloc_(dealloc),
132 bucket_table_(NULL), 132 bucket_table_(NULL),
133 profile_mmap_(profile_mmap), 133 profile_mmap_(profile_mmap),
134 profile_self_maps_(true),
134 num_buckets_(0), 135 num_buckets_(0),
135 address_map_(NULL) { 136 address_map_(NULL) {
136 // Make a hash table for buckets. 137 // Make a hash table for buckets.
137 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); 138 const int table_bytes = kHashTableSize * sizeof(*bucket_table_);
138 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes)); 139 bucket_table_ = static_cast<Bucket**>(alloc_(table_bytes));
139 memset(bucket_table_, 0, table_bytes); 140 memset(bucket_table_, 0, table_bytes);
140 141
141 // Make an allocation map. 142 // Make an allocation map.
142 address_map_ = 143 address_map_ =
143 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_); 144 new(alloc_(sizeof(AllocationMap))) AllocationMap(alloc_, dealloc_);
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 AllocContextInfo info; 387 AllocContextInfo info;
387 for (int i = 0; i < num_buckets_; ++i) { 388 for (int i = 0; i < num_buckets_; ++i) {
388 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]); 389 *static_cast<Stats*>(&info) = *static_cast<Stats*>(list[i]);
389 info.stack_depth = list[i]->depth; 390 info.stack_depth = list[i]->depth;
390 info.call_stack = list[i]->stack; 391 info.call_stack = list[i]->stack;
391 callback(info); 392 callback(info);
392 } 393 }
393 dealloc_(list); 394 dealloc_(list);
394 } 395 }
395 396
397 void HeapProfileTable::DisableProfileSelfMaps() {
398 profile_self_maps_ = false;
399 }
400
396 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const { 401 int HeapProfileTable::FillOrderedProfile(char buf[], int size) const {
397 Bucket** list = MakeSortedBucketList(); 402 Bucket** list = MakeSortedBucketList();
398 403
399 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info". 404 int map_length = 0;
400 // In the cases buf is too small, we'd rather leave out the last 405 char* map_start = NULL;
401 // buckets than leave out the /proc/self/maps info. To ensure that, 406 // The data from /proc/self/maps is not required for pseudo-stack profiles
402 // we actually print the /proc/self/maps info first, then move it to 407 // and increases the size of the profile dumps significantly.
Dai Mikurube (NOT FULLTIME) 2013/07/01 05:47:42 Is maps so large? Is large output so serious? I
James Cook 2013/07/01 16:59:16 There's also a problem where attempting to read /p
James Cook 2013/07/01 22:37:16 The GPU process hang is due to the security sandbo
Dai Mikurube (NOT FULLTIME) 2013/07/02 01:39:06 Ah, makes sense. Sorry that I forgot it. In case o
403 // the end of the buffer, then write the bucket info into whatever 408 if (profile_self_maps_) {
404 // is remaining, and then move the maps info one last time to close 409 // Our file format is "bucket, bucket, ..., bucket, proc_self_maps_info".
405 // any gaps. Whew! 410 // In the cases buf is too small, we'd rather leave out the last
406 int map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader); 411 // buckets than leave out the /proc/self/maps info. To ensure that,
407 if (map_length < 0 || map_length >= size) return 0; 412 // we actually print the /proc/self/maps info first, then move it to
408 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety? 413 // the end of the buffer, then write the bucket info into whatever
409 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy); 414 // is remaining, and then move the maps info one last time to close
410 RAW_DCHECK(map_length <= size, ""); 415 // any gaps. Whew!
411 char* const map_start = buf + size - map_length; // move to end 416 map_length = snprintf(buf, size, "%s", kProcSelfMapsHeader);
412 memmove(map_start, buf, map_length); 417 if (map_length < 0 || map_length >= size) return 0;
413 size -= map_length; 418 bool dummy; // "wrote_all" -- did /proc/self/maps fit in its entirety?
419 map_length += FillProcSelfMaps(buf + map_length, size - map_length, &dummy);
420 RAW_DCHECK(map_length <= size, "");
421 map_start = buf + size - map_length; // move to end
422 memmove(map_start, buf, map_length);
423 size -= map_length;
424 }
414 425
415 Stats stats; 426 Stats stats;
416 memset(&stats, 0, sizeof(stats)); 427 memset(&stats, 0, sizeof(stats));
417 int bucket_length = snprintf(buf, size, "%s", kProfileHeader); 428 int bucket_length = snprintf(buf, size, "%s", kProfileHeader);
418 if (bucket_length < 0 || bucket_length >= size) return 0; 429 if (bucket_length < 0 || bucket_length >= size) return 0;
419 bucket_length = UnparseBucket(total_, buf, bucket_length, size, 430 bucket_length = UnparseBucket(total_, buf, bucket_length, size,
420 " heapprofile", &stats); 431 " heapprofile", &stats);
421 432
422 // Dump the mmap list first. 433 // Dump the mmap list first.
423 if (profile_mmap_) { 434 if (profile_mmap_) {
424 BufferArgs buffer(buf, bucket_length, size); 435 BufferArgs buffer(buf, bucket_length, size);
425 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer); 436 MemoryRegionMap::IterateBuckets<BufferArgs*>(DumpBucketIterator, &buffer);
426 bucket_length = buffer.buflen; 437 bucket_length = buffer.buflen;
427 } 438 }
428 439
429 for (int i = 0; i < num_buckets_; i++) { 440 for (int i = 0; i < num_buckets_; i++) {
430 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "", 441 bucket_length = UnparseBucket(*list[i], buf, bucket_length, size, "",
431 &stats); 442 &stats);
432 } 443 }
433 RAW_DCHECK(bucket_length < size, ""); 444 RAW_DCHECK(bucket_length < size, "");
434 445
435 dealloc_(list); 446 dealloc_(list);
436 447
448 if (!profile_self_maps_)
449 return bucket_length;
450
437 RAW_DCHECK(buf + bucket_length <= map_start, ""); 451 RAW_DCHECK(buf + bucket_length <= map_start, "");
438 memmove(buf + bucket_length, map_start, map_length); // close the gap 452 memmove(buf + bucket_length, map_start, map_length); // close the gap
439 453
440 return bucket_length + map_length; 454 return bucket_length + map_length;
441 } 455 }
442 456
443 // static 457 // static
444 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket, 458 void HeapProfileTable::DumpBucketIterator(const Bucket* bucket,
445 BufferArgs* args) { 459 BufferArgs* args) {
446 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize, 460 args->buflen = UnparseBucket(*bucket, args->buf, args->buflen, args->bufsize,
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
741 char* unused) { 755 char* unused) {
742 // Perhaps also log the allocation stack trace (unsymbolized) 756 // Perhaps also log the allocation stack trace (unsymbolized)
743 // on this line in case somebody finds it useful. 757 // on this line in case somebody finds it useful.
744 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr); 758 RAW_LOG(ERROR, "leaked %"PRIuS" byte object %p", v->bytes, ptr);
745 } 759 }
746 760
747 void HeapProfileTable::Snapshot::ReportIndividualObjects() { 761 void HeapProfileTable::Snapshot::ReportIndividualObjects() {
748 char unused; 762 char unused;
749 map_.Iterate(ReportObject, &unused); 763 map_.Iterate(ReportObject, &unused);
750 } 764 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698