| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include "base/debug/stack_trace.h" | 7 #include "base/debug/stack_trace.h" |
| 8 #include "base/files/file.h" | 8 #include "base/files/file.h" |
| 9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
| 10 #include "base/files/memory_mapped_file.h" | 10 #include "base/files/memory_mapped_file.h" |
| (...skipping 482 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 493 StringPiece name, | 493 StringPiece name, |
| 494 int stack_depth) { | 494 int stack_depth) { |
| 495 CreateWithAllocator( | 495 CreateWithAllocator( |
| 496 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | 496 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
| 497 stack_depth); | 497 stack_depth); |
| 498 } | 498 } |
| 499 | 499 |
| 500 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | 500 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
| 501 DCHECK(!this_thread_tracker_.Get()); | 501 DCHECK(!this_thread_tracker_.Get()); |
| 502 | 502 |
| 503 PersistentMemoryAllocator::Reference mem_reference = 0; | 503 PersistentMemoryAllocator::Reference mem_reference = |
| 504 void* mem_base = nullptr; | 504 PersistentMemoryAllocator::kReferenceNull; |
| 505 DCHECK(!mem_reference); // invalid_value should be checkable with ! |
| 505 | 506 |
| 506 // Get the current count of available memories, acquiring the array values. | 507 while (true) { |
| 507 int count = available_memories_count_.load(std::memory_order_acquire); | 508 // Get the first available memory from the top of the FIFO. |
| 508 while (count > 0) { | 509 if (!available_memories_.pop(&mem_reference)) |
| 509 // There is a memory block that was previously released (and zeroed) so | 510 break; |
| 510 // just re-use that rather than allocating a new one. Use "relaxed" because | |
| 511 // the value is guarded by the |count| "acquire". A zero reference replaces | |
| 512 // the existing value so that it can't be used by another thread that | |
| 513 // manages to interrupt this one before the count can be decremented. | |
| 514 // A zero reference is also required for the "push" operation to work | |
| 515 // once the count finally does get decremented. | |
| 516 mem_reference = | |
| 517 available_memories_[count - 1].exchange(0, std::memory_order_relaxed); | |
| 518 | 511 |
| 519 // If the reference is zero, it's already been taken but count hasn't yet | 512 // Turn the reference back into one of the activity-tracker type. This can |
| 520 // been decremented. Give that other thread a chance to finish then reload | 513 // fail if something else has already taken the block and changed its type. |
| 521 // the "count" value and try again. | 514 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
| 522 if (!mem_reference) { | 515 kTypeIdActivityTrackerFree)) { |
| 523 PlatformThread::YieldCurrentThread(); | 516 break; |
| 524 count = available_memories_count_.load(std::memory_order_acquire); | |
| 525 continue; | |
| 526 } | 517 } |
| 527 | |
| 528 // Decrement the count indicating that the value has been taken. If this | |
| 529 // fails then another thread has pushed something new and incremented the | |
| 530 // count. | |
| 531 // NOTE: |oldcount| will be loaded with the existing value. | |
| 532 int oldcount = count; | |
| 533 if (!available_memories_count_.compare_exchange_strong( | |
| 534 oldcount, count - 1, std::memory_order_acquire, | |
| 535 std::memory_order_acquire)) { | |
| 536 DCHECK_LT(count, oldcount); | |
| 537 | |
| 538 // Restore the reference that was zeroed above and try again. | |
| 539 available_memories_[count - 1].store(mem_reference, | |
| 540 std::memory_order_relaxed); | |
| 541 count = oldcount; | |
| 542 continue; | |
| 543 } | |
| 544 | |
| 545 // Turn the reference back into one of the activity-tracker type. | |
| 546 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
| 547 kTypeIdActivityTrackerFree); | |
| 548 DCHECK(mem_base); | |
| 549 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
| 550 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
| 551 kTypeIdActivityTrackerFree); | |
| 552 DCHECK(changed); | |
| 553 | |
| 554 // Success. | |
| 555 break; | |
| 556 } | 518 } |
| 557 | 519 |
| 558 // Handle the case where no previously-used memories are available. | 520 // Handle the case where no known available memories were found. |
| 559 if (count == 0) { | 521 if (!mem_reference) { |
| 560 // Allocate a block of memory from the persistent segment. | 522 // Allocate a block of memory from the persistent segment. |
| 561 mem_reference = | 523 mem_reference = |
| 562 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | 524 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); |
| 563 if (mem_reference) { | 525 if (mem_reference) { |
| 564 // Success. Convert the reference to an actual memory address. | 526 // Success. Make the allocation iterable so it can be found later. |
| 565 mem_base = | |
| 566 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
| 567 // Make the allocation iterable so it can be found by other processes. | |
| 568 allocator_->MakeIterable(mem_reference); | 527 allocator_->MakeIterable(mem_reference); |
| 569 } else { | 528 } else { |
| 570 // Failure. This shouldn't happen. | 529 // Failure. Look for any free blocks that weren't held in the cache |
| 571 NOTREACHED(); | 530 // of available memories and try to claim it. This can happen if the |
| 572 // But if it does, probably because the allocator wasn't given enough | 531 // |available_memories_| stack isn't sufficiently large to hold all |
| 573 // memory to satisfy all possible requests, handle it gracefully by | 532 // released memories or if multiple independent processes are sharing |
| 574 // allocating the required memory from the heap. | 533 // the memory segment. |
| 575 mem_base = new char[stack_memory_size_]; | 534 PersistentMemoryAllocator::Iterator iter(allocator_.get()); |
| 576 memset(mem_base, 0, stack_memory_size_); | 535 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) != |
| 577 // Report the thread-count at which the allocator was full so that the | 536 0) { |
| 578 // failure can be seen and underlying memory resized appropriately. | 537 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
| 579 UMA_HISTOGRAM_COUNTS_1000( | 538 kTypeIdActivityTrackerFree)) { |
| 580 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", | 539 break; |
| 581 thread_tracker_count_.load(std::memory_order_relaxed)); | 540 } |
| 541 mem_reference = 0; |
| 542 } |
| 543 if (!mem_reference) { |
| 544 // Dobule Failure. This shouldn't happen. But be graceful if it does, |
| 545 // probably because the underlying allocator wasn't given enough memory |
| 546 // to satisfy all possible requests. |
| 547 NOTREACHED(); |
| 548 // Report the thread-count at which the allocator was full so that the |
| 549 // failure can be seen and underlying memory resized appropriately. |
| 550 UMA_HISTOGRAM_COUNTS_1000( |
| 551 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", |
| 552 thread_tracker_count_.load(std::memory_order_relaxed)); |
| 553 // Return null, just as if tracking wasn't enabled. |
| 554 return nullptr; |
| 555 } |
| 582 } | 556 } |
| 583 } | 557 } |
| 584 | 558 |
| 559 // Convert the memory block found above into an actual memory address. |
| 560 DCHECK(mem_reference); |
| 561 void* mem_base = |
| 562 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 563 DCHECK(mem_base); |
| 564 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 565 |
| 585 // Create a tracker with the acquired memory and set it as the tracker | 566 // Create a tracker with the acquired memory and set it as the tracker |
| 586 // for this particular thread in thread-local-storage. | 567 // for this particular thread in thread-local-storage. |
| 587 DCHECK(mem_base); | |
| 588 ManagedActivityTracker* tracker = | 568 ManagedActivityTracker* tracker = |
| 589 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | 569 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); |
| 590 DCHECK(tracker->IsValid()); | 570 DCHECK(tracker->IsValid()); |
| 591 this_thread_tracker_.Set(tracker); | 571 this_thread_tracker_.Set(tracker); |
| 592 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | 572 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); |
| 593 | 573 |
| 594 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", | 574 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", |
| 595 old_count + 1, kMaxThreadCount); | 575 old_count + 1, kMaxThreadCount); |
| 596 return tracker; | 576 return tracker; |
| 597 } | 577 } |
| 598 | 578 |
| 599 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | 579 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
| 600 ThreadActivityTracker* tracker = | 580 ThreadActivityTracker* tracker = |
| 601 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | 581 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
| 602 if (tracker) | 582 if (tracker) |
| 603 delete tracker; | 583 delete tracker; |
| 604 } | 584 } |
| 605 | 585 |
| 606 GlobalActivityTracker::GlobalActivityTracker( | 586 GlobalActivityTracker::GlobalActivityTracker( |
| 607 std::unique_ptr<PersistentMemoryAllocator> allocator, | 587 std::unique_ptr<PersistentMemoryAllocator> allocator, |
| 608 int stack_depth) | 588 int stack_depth) |
| 609 : allocator_(std::move(allocator)), | 589 : allocator_(std::move(allocator)), |
| 610 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | 590 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
| 611 this_thread_tracker_(&OnTLSDestroy), | 591 this_thread_tracker_(&OnTLSDestroy), |
| 612 thread_tracker_count_(0), | 592 thread_tracker_count_(0), |
| 613 available_memories_count_(0) { | 593 available_memories_(kMaxThreadCount) { |
| 614 // Clear the available-memories array. | |
| 615 memset(available_memories_, 0, sizeof(available_memories_)); | |
| 616 | |
| 617 // Ensure the passed memory is valid and empty (iterator finds nothing). | 594 // Ensure the passed memory is valid and empty (iterator finds nothing). |
| 618 uint32_t type; | 595 uint32_t type; |
| 619 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 596 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
| 620 | 597 |
| 621 // Ensure that there is no other global object and then make this one such. | 598 // Ensure that there is no other global object and then make this one such. |
| 622 DCHECK(!g_tracker_); | 599 DCHECK(!g_tracker_); |
| 623 g_tracker_ = this; | 600 g_tracker_ = this; |
| 624 } | 601 } |
| 625 | 602 |
| 626 GlobalActivityTracker::~GlobalActivityTracker() { | 603 GlobalActivityTracker::~GlobalActivityTracker() { |
| 627 DCHECK_EQ(g_tracker_, this); | 604 DCHECK_EQ(g_tracker_, this); |
| 628 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 605 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 629 g_tracker_ = nullptr; | 606 g_tracker_ = nullptr; |
| 630 } | 607 } |
| 631 | 608 |
| 632 void GlobalActivityTracker::ReturnTrackerMemory( | 609 void GlobalActivityTracker::ReturnTrackerMemory( |
| 633 ManagedActivityTracker* tracker) { | 610 ManagedActivityTracker* tracker) { |
| 634 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | 611 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; |
| 635 void* mem_base = tracker->mem_base_; | 612 void* mem_base = tracker->mem_base_; |
| 613 DCHECK(mem_reference); |
| 614 DCHECK(mem_base); |
| 636 | 615 |
| 637 // Zero the memory so that it is ready for use if needed again later. It's | 616 // Zero the memory so that it is ready for use if needed again later. It's |
| 638 // better to clear the memory now, when a thread is exiting, than to do it | 617 // better to clear the memory now, when a thread is exiting, than to do it |
| 639 // when it is first needed by a thread doing actual work. | 618 // when it is first needed by a thread doing actual work. |
| 640 memset(mem_base, 0, stack_memory_size_); | 619 memset(mem_base, 0, stack_memory_size_); |
| 641 | 620 |
| 642 // Remove the destructed tracker from the set of known ones. | 621 // Remove the destructed tracker from the set of known ones. |
| 643 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 622 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 644 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 623 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
| 645 | 624 |
| 646 // Deal with the memory that was used by the tracker. | 625 // The memory was within the persistent memory allocator. Change its type |
| 647 if (mem_reference) { | 626 // so it is effectively marked as "free". |
| 648 // The memory was within the persistent memory allocator. Change its type | 627 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, |
| 649 // so that iteration won't find it. | 628 kTypeIdActivityTracker); |
| 650 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | |
| 651 kTypeIdActivityTracker); | |
| 652 // There is no way to free memory from a persistent allocator so instead | |
| 653 // push it on the internal list of available memory blocks. | |
| 654 while (true) { | |
| 655 // Get the existing count of available memories and ensure we won't | |
| 656 // burst the array. Acquire the values in the array. | |
| 657 int count = available_memories_count_.load(std::memory_order_acquire); | |
| 658 if (count >= kMaxThreadCount) { | |
| 659 NOTREACHED(); | |
| 660 // Storage is full. Just forget about this memory. It won't be re-used | |
| 661 // but there's no real loss. | |
| 662 break; | |
| 663 } | |
| 664 | 629 |
| 665 // Write the reference of the memory being returned to this slot in the | 630 // Push this on the internal cache of available memory blocks so it can |
| 666 // array. Empty slots have a value of zero so do an atomic compare-and- | 631 // be found and reused quickly. If the push somehow exceeds the maximum |
| 667 // exchange to ensure that a race condition doesn't exist with another | 632 // size of the cache, it will fail but a fallback check in CreateTracker |
| 668 // thread doing the same. | 633 // will find it by (slow) iteration. |
| 669 PersistentMemoryAllocator::Reference mem_expected = 0; | 634 available_memories_.push(mem_reference); |
| 670 if (!available_memories_[count].compare_exchange_strong( | |
| 671 mem_expected, mem_reference, std::memory_order_release, | |
| 672 std::memory_order_relaxed)) { | |
| 673 PlatformThread::YieldCurrentThread(); | |
| 674 continue; // Try again. | |
| 675 } | |
| 676 | |
| 677 // Increment the count, releasing the value written to the array. This | |
| 678 // could fail if a simultaneous "pop" operation decremented the counter. | |
| 679 // If that happens, clear the array slot and start over. Do a "strong" | |
| 680 // exchange to avoid spurious retries that can occur with a "weak" one. | |
| 681 int expected = count; // Updated by compare/exchange. | |
| 682 if (!available_memories_count_.compare_exchange_strong( | |
| 683 expected, count + 1, std::memory_order_release, | |
| 684 std::memory_order_relaxed)) { | |
| 685 available_memories_[count].store(0, std::memory_order_relaxed); | |
| 686 continue; | |
| 687 } | |
| 688 | |
| 689 // Count was successfully incremented to reflect the newly added value. | |
| 690 break; | |
| 691 } | |
| 692 } else { | |
| 693 // The memory was allocated from the process heap. This shouldn't happen | |
| 694 // because the persistent memory segment should be big enough for all | |
| 695 // thread stacks but it's better to support falling back to allocation | |
| 696 // from the heap rather than crash. Everything will work as normal but | |
| 697 // the data won't be persisted. | |
| 698 delete[] reinterpret_cast<char*>(mem_base); | |
| 699 } | |
| 700 } | 635 } |
| 701 | 636 |
| 702 // static | 637 // static |
| 703 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 638 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 704 delete reinterpret_cast<ManagedActivityTracker*>(value); | 639 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 705 } | 640 } |
| 706 | 641 |
| 707 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | 642 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, |
| 708 uint8_t action, | 643 uint8_t action, |
| 709 uint32_t id, | 644 uint32_t id, |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 770 const base::Process* process) | 705 const base::Process* process) |
| 771 : GlobalActivityTracker::ScopedThreadActivity( | 706 : GlobalActivityTracker::ScopedThreadActivity( |
| 772 nullptr, | 707 nullptr, |
| 773 Activity::ACT_PROCESS_WAIT, | 708 Activity::ACT_PROCESS_WAIT, |
| 774 ActivityData::ForProcess(process->Pid()), | 709 ActivityData::ForProcess(process->Pid()), |
| 775 /*lock_allowed=*/true) {} | 710 /*lock_allowed=*/true) {} |
| 776 #endif | 711 #endif |
| 777 | 712 |
| 778 } // namespace debug | 713 } // namespace debug |
| 779 } // namespace base | 714 } // namespace base |
| OLD | NEW |