OLD | NEW |
1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
2 * All rights reserved. | 2 * All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
224 // regions_ on demand for us. | 224 // regions_ on demand for us. |
225 if (use_buckets) { | 225 if (use_buckets) { |
226 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); | 226 const int table_bytes = kHashTableSize * sizeof(*bucket_table_); |
227 recursive_insert = true; | 227 recursive_insert = true; |
228 bucket_table_ = static_cast<HeapProfileBucket**>( | 228 bucket_table_ = static_cast<HeapProfileBucket**>( |
229 MyAllocator::Allocate(table_bytes)); | 229 MyAllocator::Allocate(table_bytes)); |
230 recursive_insert = false; | 230 recursive_insert = false; |
231 memset(bucket_table_, 0, table_bytes); | 231 memset(bucket_table_, 0, table_bytes); |
232 num_buckets_ = 0; | 232 num_buckets_ = 0; |
233 } | 233 } |
| 234 if (regions_ == NULL) // init regions_ |
| 235 InitRegionSetLocked(); |
234 Unlock(); | 236 Unlock(); |
235 RAW_VLOG(10, "MemoryRegionMap Init done"); | 237 RAW_VLOG(10, "MemoryRegionMap Init done"); |
236 } | 238 } |
237 | 239 |
238 bool MemoryRegionMap::Shutdown() { | 240 bool MemoryRegionMap::Shutdown() { |
239 RAW_VLOG(10, "MemoryRegionMap Shutdown"); | 241 RAW_VLOG(10, "MemoryRegionMap Shutdown"); |
240 Lock(); | 242 Lock(); |
241 RAW_CHECK(client_count_ > 0, ""); | 243 RAW_CHECK(client_count_ > 0, ""); |
242 client_count_ -= 1; | 244 client_count_ -= 1; |
243 if (client_count_ != 0) { // not last client; need not really shutdown | 245 if (client_count_ != 0) { // not last client; need not really shutdown |
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
526 memset(new_bucket, 0, sizeof(*new_bucket)); | 528 memset(new_bucket, 0, sizeof(*new_bucket)); |
527 new_bucket->hash = bucket.hash; | 529 new_bucket->hash = bucket.hash; |
528 new_bucket->depth = bucket.depth; | 530 new_bucket->depth = bucket.depth; |
529 new_bucket->stack = key_copy; | 531 new_bucket->stack = key_copy; |
530 new_bucket->next = bucket_table_[hash_index]; | 532 new_bucket->next = bucket_table_[hash_index]; |
531 bucket_table_[hash_index] = new_bucket; | 533 bucket_table_[hash_index] = new_bucket; |
532 ++num_buckets_; | 534 ++num_buckets_; |
533 } | 535 } |
534 } | 536 } |
535 | 537 |
| 538 inline void MemoryRegionMap::InitRegionSetLocked() { |
| 539 RAW_VLOG(12, "Initializing region set"); |
| 540 regions_ = regions_rep.region_set(); |
| 541 recursive_insert = true; |
| 542 new(regions_) RegionSet(); |
| 543 HandleSavedRegionsLocked(&DoInsertRegionLocked); |
| 544 recursive_insert = false; |
| 545 } |
| 546 |
536 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { | 547 inline void MemoryRegionMap::InsertRegionLocked(const Region& region) { |
537 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); | 548 RAW_CHECK(LockIsHeld(), "should be held (by this thread)"); |
538 // We can be called recursively, because RegionSet constructor | 549 // We can be called recursively, because RegionSet constructor |
539 // and DoInsertRegionLocked() (called below) can call the allocator. | 550 // and DoInsertRegionLocked() (called below) can call the allocator. |
540 // recursive_insert tells us if that's the case. When this happens, | 551 // recursive_insert tells us if that's the case. When this happens, |
541 // region insertion information is recorded in saved_regions[], | 552 // region insertion information is recorded in saved_regions[], |
542 // and taken into account when the recursion unwinds. | 553 // and taken into account when the recursion unwinds. |
543 // Do the insert: | 554 // Do the insert: |
544 if (recursive_insert) { // recursion: save in saved_regions | 555 if (recursive_insert) { // recursion: save in saved_regions |
545 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", | 556 RAW_VLOG(12, "Saving recursive insert of region %p..%p from %p", |
546 reinterpret_cast<void*>(region.start_addr), | 557 reinterpret_cast<void*>(region.start_addr), |
547 reinterpret_cast<void*>(region.end_addr), | 558 reinterpret_cast<void*>(region.end_addr), |
548 reinterpret_cast<void*>(region.caller())); | 559 reinterpret_cast<void*>(region.caller())); |
549 RAW_CHECK(saved_regions_count < arraysize(saved_regions), ""); | 560 RAW_CHECK(saved_regions_count < arraysize(saved_regions), ""); |
550 // Copy 'region' to saved_regions[saved_regions_count] | 561 // Copy 'region' to saved_regions[saved_regions_count] |
551 // together with the contents of its call_stack, | 562 // together with the contents of its call_stack, |
552 // then increment saved_regions_count. | 563 // then increment saved_regions_count. |
553 saved_regions[saved_regions_count++] = region; | 564 saved_regions[saved_regions_count++] = region; |
554 } else { // not a recusrive call | 565 } else { // not a recusrive call |
555 if (regions_ == NULL) { // init regions_ | 566 if (regions_ == NULL) // init regions_ |
556 RAW_VLOG(12, "Initializing region set"); | 567 InitRegionSetLocked(); |
557 regions_ = regions_rep.region_set(); | |
558 recursive_insert = true; | |
559 new(regions_) RegionSet(); | |
560 HandleSavedRegionsLocked(&DoInsertRegionLocked); | |
561 recursive_insert = false; | |
562 } | |
563 recursive_insert = true; | 568 recursive_insert = true; |
564 // Do the actual insertion work to put new regions into regions_: | 569 // Do the actual insertion work to put new regions into regions_: |
565 DoInsertRegionLocked(region); | 570 DoInsertRegionLocked(region); |
566 HandleSavedRegionsLocked(&DoInsertRegionLocked); | 571 HandleSavedRegionsLocked(&DoInsertRegionLocked); |
567 recursive_insert = false; | 572 recursive_insert = false; |
568 } | 573 } |
569 } | 574 } |
570 | 575 |
571 // We strip out different number of stack frames in debug mode | 576 // We strip out different number of stack frames in debug mode |
572 // because less inlining happens in that case | 577 // because less inlining happens in that case |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
801 r != regions_->end(); ++r) { | 806 r != regions_->end(); ++r) { |
802 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " | 807 RAW_LOG(INFO, "Memory region 0x%"PRIxPTR"..0x%"PRIxPTR" " |
803 "from 0x%"PRIxPTR" stack=%d", | 808 "from 0x%"PRIxPTR" stack=%d", |
804 r->start_addr, r->end_addr, r->caller(), r->is_stack); | 809 r->start_addr, r->end_addr, r->caller(), r->is_stack); |
805 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); | 810 RAW_CHECK(previous < r->end_addr, "wow, we messed up the set order"); |
806 // this must be caused by uncontrolled recursive operations on regions_ | 811 // this must be caused by uncontrolled recursive operations on regions_ |
807 previous = r->end_addr; | 812 previous = r->end_addr; |
808 } | 813 } |
809 RAW_LOG(INFO, "End of regions list"); | 814 RAW_LOG(INFO, "End of regions list"); |
810 } | 815 } |
OLD | NEW |