OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
6 | 6 |
7 #include "base/debug/stack_trace.h" | 7 #include "base/debug/stack_trace.h" |
8 #include "base/files/file.h" | 8 #include "base/files/file.h" |
9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
10 #include "base/files/memory_mapped_file.h" | 10 #include "base/files/memory_mapped_file.h" |
(...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
491 StringPiece name, | 491 StringPiece name, |
492 int stack_depth) { | 492 int stack_depth) { |
493 CreateWithAllocator( | 493 CreateWithAllocator( |
494 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | 494 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
495 stack_depth); | 495 stack_depth); |
496 } | 496 } |
497 | 497 |
498 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | 498 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
499 DCHECK(!this_thread_tracker_.Get()); | 499 DCHECK(!this_thread_tracker_.Get()); |
500 | 500 |
501 PersistentMemoryAllocator::Reference mem_reference = 0; | 501 PersistentMemoryAllocator::Reference mem_reference = |
502 void* mem_base = nullptr; | 502 available_memories_.invalid_value(); |
| 503 DCHECK(!mem_reference); // invalid_value should be checkable with ! |
503 | 504 |
504 // Get the current count of available memories, acquiring the array values. | 505 while (true) { |
505 int count = available_memories_count_.load(std::memory_order_acquire); | 506 // Get the first available memory from the top of the FIFO. |
506 while (count > 0) { | 507 mem_reference = available_memories_.pop(); |
507 // There is a memory block that was previously released (and zeroed) so | 508 if (!mem_reference) |
508 // just re-use that rather than allocating a new one. Use "relaxed" because | 509 break; |
509 // the value is guarded by the |count| "acquire". A zero reference replaces | |
510 // the existing value so that it can't be used by another thread that | |
511 // manages to interrupt this one before the count can be decremented. | |
512 // A zero reference is also required for the "push" operation to work | |
513 // once the count finally does get decremented. | |
514 mem_reference = | |
515 available_memories_[count - 1].exchange(0, std::memory_order_relaxed); | |
516 | 510 |
517 // If the reference is zero, it's already been taken but count hasn't yet | 511 // Turn the reference back into one of the activity-tracker type. This can |
518 // been decremented. Give that other thread a chance to finish then reload | 512 // fail if something else has already taken the block and changed its type. |
519 // the "count" value and try again. | 513 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
520 if (!mem_reference) { | 514 kTypeIdActivityTrackerFree)) { |
521 PlatformThread::YieldCurrentThread(); | 515 break; |
522 count = available_memories_count_.load(std::memory_order_acquire); | |
523 continue; | |
524 } | 516 } |
525 | |
526 // Decrement the count indicating that the value has been taken. If this | |
527 // fails then another thread has pushed something new and incremented the | |
528 // count. | |
529 // NOTE: |oldcount| will be loaded with the existing value. | |
530 int oldcount = count; | |
531 if (!available_memories_count_.compare_exchange_strong( | |
532 oldcount, count - 1, std::memory_order_acquire, | |
533 std::memory_order_acquire)) { | |
534 DCHECK_LT(count, oldcount); | |
535 | |
536 // Restore the reference that was zeroed above and try again. | |
537 available_memories_[count - 1].store(mem_reference, | |
538 std::memory_order_relaxed); | |
539 count = oldcount; | |
540 continue; | |
541 } | |
542 | |
543 // Turn the reference back into one of the activity-tracker type. | |
544 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
545 kTypeIdActivityTrackerFree); | |
546 DCHECK(mem_base); | |
547 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
548 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
549 kTypeIdActivityTrackerFree); | |
550 DCHECK(changed); | |
551 | |
552 // Success. | |
553 break; | |
554 } | 517 } |
555 | 518 |
556 // Handle the case where no previously-used memories are available. | 519 // Handle the case where no known available memories were found. |
557 if (count == 0) { | 520 if (!mem_reference) { |
558 // Allocate a block of memory from the persistent segment. | 521 // Allocate a block of memory from the persistent segment. |
559 mem_reference = | 522 mem_reference = |
560 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | 523 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); |
561 if (mem_reference) { | 524 if (mem_reference) { |
562 // Success. Convert the reference to an actual memory address. | 525 // Success. Make the allocation iterable so it can be found later. |
563 mem_base = | |
564 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
565 // Make the allocation iterable so it can be found by other processes. | |
566 allocator_->MakeIterable(mem_reference); | 526 allocator_->MakeIterable(mem_reference); |
567 } else { | 527 } else { |
568 // Failure. This shouldn't happen. | 528 // Failure. Look for any free blocks that weren't held in the cache |
569 NOTREACHED(); | 529 // of available memories and try to claim it. This can happen if the |
570 // But if it does, probably because the allocator wasn't given enough | 530 // |available_memories_| stack isn't sufficiently large to hold all |
571 // memory to satisfy all possible requests, handle it gracefully by | 531 // released memories or if multiple independent processes are sharing |
572 // allocating the required memory from the heap. | 532 // the memory segment. |
573 mem_base = new char[stack_memory_size_]; | 533 PersistentMemoryAllocator::Iterator iter(allocator_.get()); |
574 memset(mem_base, 0, stack_memory_size_); | 534 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) != |
575 // Report the thread-count at which the allocator was full so that the | 535 0) { |
576 // failure can be seen and underlying memory resized appropriately. | 536 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
577 UMA_HISTOGRAM_COUNTS_1000( | 537 kTypeIdActivityTrackerFree)) { |
578 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", | 538 break; |
579 thread_tracker_count_.load(std::memory_order_relaxed)); | 539 } |
| 540 mem_reference = 0; |
| 541 } |
| 542 if (!mem_reference) { |
| 543 // Dobule Failure. This shouldn't happen. But be graceful if it does, |
| 544 // probably because the underlying allocator wasn't given enough memory |
| 545 // to satisfy all possible requests. |
| 546 NOTREACHED(); |
| 547 // Report the thread-count at which the allocator was full so that the |
| 548 // failure can be seen and underlying memory resized appropriately. |
| 549 UMA_HISTOGRAM_COUNTS_1000( |
| 550 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", |
| 551 thread_tracker_count_.load(std::memory_order_relaxed)); |
| 552 // Return null, just as if tracking wasn't enabled. |
| 553 return nullptr; |
| 554 } |
580 } | 555 } |
581 } | 556 } |
582 | 557 |
| 558 // Convert the memory block found above into an actual memory address. |
| 559 DCHECK(mem_reference); |
| 560 void* mem_base = |
| 561 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 562 DCHECK(mem_base); |
| 563 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 564 |
583 // Create a tracker with the acquired memory and set it as the tracker | 565 // Create a tracker with the acquired memory and set it as the tracker |
584 // for this particular thread in thread-local-storage. | 566 // for this particular thread in thread-local-storage. |
585 DCHECK(mem_base); | |
586 ManagedActivityTracker* tracker = | 567 ManagedActivityTracker* tracker = |
587 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | 568 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); |
588 DCHECK(tracker->IsValid()); | 569 DCHECK(tracker->IsValid()); |
589 this_thread_tracker_.Set(tracker); | 570 this_thread_tracker_.Set(tracker); |
590 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | 571 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); |
591 | 572 |
592 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", | 573 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", |
593 old_count + 1, kMaxThreadCount); | 574 old_count + 1, kMaxThreadCount); |
594 return tracker; | 575 return tracker; |
595 } | 576 } |
596 | 577 |
597 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | 578 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
598 ThreadActivityTracker* tracker = | 579 ThreadActivityTracker* tracker = |
599 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | 580 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
600 if (tracker) { | 581 if (tracker) { |
601 this_thread_tracker_.Free(); | 582 this_thread_tracker_.Free(); |
602 delete tracker; | 583 delete tracker; |
603 } | 584 } |
604 } | 585 } |
605 | 586 |
606 GlobalActivityTracker::GlobalActivityTracker( | 587 GlobalActivityTracker::GlobalActivityTracker( |
607 std::unique_ptr<PersistentMemoryAllocator> allocator, | 588 std::unique_ptr<PersistentMemoryAllocator> allocator, |
608 int stack_depth) | 589 int stack_depth) |
609 : allocator_(std::move(allocator)), | 590 : allocator_(std::move(allocator)), |
610 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | 591 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
611 this_thread_tracker_(&OnTLSDestroy), | 592 this_thread_tracker_(&OnTLSDestroy), |
612 thread_tracker_count_(0), | 593 thread_tracker_count_(0), |
613 available_memories_count_(0) { | 594 available_memories_(kMaxThreadCount, |
614 // Clear the available-memories array. | 595 PersistentMemoryAllocator::kReferenceNull) { |
615 memset(available_memories_, 0, sizeof(available_memories_)); | |
616 | |
617 // Ensure the passed memory is valid and empty (iterator finds nothing). | 596 // Ensure the passed memory is valid and empty (iterator finds nothing). |
618 uint32_t type; | 597 uint32_t type; |
619 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 598 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
620 | 599 |
621 // Ensure that there is no other global object and then make this one such. | 600 // Ensure that there is no other global object and then make this one such. |
622 DCHECK(!g_tracker_); | 601 DCHECK(!g_tracker_); |
623 g_tracker_ = this; | 602 g_tracker_ = this; |
624 } | 603 } |
625 | 604 |
626 GlobalActivityTracker::~GlobalActivityTracker() { | 605 GlobalActivityTracker::~GlobalActivityTracker() { |
627 DCHECK_EQ(g_tracker_, this); | 606 DCHECK_EQ(g_tracker_, this); |
628 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 607 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
629 g_tracker_ = nullptr; | 608 g_tracker_ = nullptr; |
630 } | 609 } |
631 | 610 |
632 void GlobalActivityTracker::ReturnTrackerMemory( | 611 void GlobalActivityTracker::ReturnTrackerMemory( |
633 ManagedActivityTracker* tracker) { | 612 ManagedActivityTracker* tracker) { |
634 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | 613 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; |
635 void* mem_base = tracker->mem_base_; | 614 void* mem_base = tracker->mem_base_; |
| 615 DCHECK(mem_reference); |
| 616 DCHECK(mem_base); |
636 | 617 |
637 // Zero the memory so that it is ready for use if needed again later. It's | 618 // Zero the memory so that it is ready for use if needed again later. It's |
638 // better to clear the memory now, when a thread is exiting, than to do it | 619 // better to clear the memory now, when a thread is exiting, than to do it |
639 // when it is first needed by a thread doing actual work. | 620 // when it is first needed by a thread doing actual work. |
640 memset(mem_base, 0, stack_memory_size_); | 621 memset(mem_base, 0, stack_memory_size_); |
641 | 622 |
642 // Remove the destructed tracker from the set of known ones. | 623 // Remove the destructed tracker from the set of known ones. |
643 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 624 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
644 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 625 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
645 | 626 |
646 // Deal with the memory that was used by the tracker. | 627 // The memory was within the persistent memory allocator. Change its type |
647 if (mem_reference) { | 628 // so it is effectively marked as "free". |
648 // The memory was within the persistent memory allocator. Change its type | 629 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, |
649 // so that iteration won't find it. | 630 kTypeIdActivityTracker); |
650 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | |
651 kTypeIdActivityTracker); | |
652 // There is no way to free memory from a persistent allocator so instead | |
653 // push it on the internal list of available memory blocks. | |
654 while (true) { | |
655 // Get the existing count of available memories and ensure we won't | |
656 // burst the array. Acquire the values in the array. | |
657 int count = available_memories_count_.load(std::memory_order_acquire); | |
658 if (count >= kMaxThreadCount) { | |
659 NOTREACHED(); | |
660 // Storage is full. Just forget about this memory. It won't be re-used | |
661 // but there's no real loss. | |
662 break; | |
663 } | |
664 | 631 |
665 // Write the reference of the memory being returned to this slot in the | 632 // Push this on the internal cache of available memory blocks so it can |
666 // array. Empty slots have a value of zero so do an atomic compare-and- | 633 // be found and reused quickly. If the push somehow exceeds the maximum |
667 // exchange to ensure that a race condition doesn't exist with another | 634 // size of the cache, it will fail but a fallback check in CreateTracker |
668 // thread doing the same. | 635 // will find it by (slow) iteration. |
669 PersistentMemoryAllocator::Reference mem_expected = 0; | 636 available_memories_.push(mem_reference); |
670 if (!available_memories_[count].compare_exchange_strong( | |
671 mem_expected, mem_reference, std::memory_order_release, | |
672 std::memory_order_relaxed)) { | |
673 PlatformThread::YieldCurrentThread(); | |
674 continue; // Try again. | |
675 } | |
676 | |
677 // Increment the count, releasing the value written to the array. This | |
678 // could fail if a simultaneous "pop" operation decremented the counter. | |
679 // If that happens, clear the array slot and start over. Do a "strong" | |
680 // exchange to avoid spurious retries that can occur with a "weak" one. | |
681 int expected = count; // Updated by compare/exchange. | |
682 if (!available_memories_count_.compare_exchange_strong( | |
683 expected, count + 1, std::memory_order_release, | |
684 std::memory_order_relaxed)) { | |
685 available_memories_[count].store(0, std::memory_order_relaxed); | |
686 continue; | |
687 } | |
688 | |
689 // Count was successfully incremented to reflect the newly added value. | |
690 break; | |
691 } | |
692 } else { | |
693 // The memory was allocated from the process heap. This shouldn't happen | |
694 // because the persistent memory segment should be big enough for all | |
695 // thread stacks but it's better to support falling back to allocation | |
696 // from the heap rather than crash. Everything will work as normal but | |
697 // the data won't be persisted. | |
698 delete[] reinterpret_cast<char*>(mem_base); | |
699 } | |
700 } | 637 } |
701 | 638 |
702 // static | 639 // static |
703 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 640 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
704 delete reinterpret_cast<ManagedActivityTracker*>(value); | 641 delete reinterpret_cast<ManagedActivityTracker*>(value); |
705 } | 642 } |
706 | 643 |
707 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | 644 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, |
708 uint8_t action, | 645 uint8_t action, |
709 uint32_t id, | 646 uint32_t id, |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
770 const base::Process* process) | 707 const base::Process* process) |
771 : GlobalActivityTracker::ScopedThreadActivity( | 708 : GlobalActivityTracker::ScopedThreadActivity( |
772 nullptr, | 709 nullptr, |
773 Activity::ACT_PROCESS_WAIT, | 710 Activity::ACT_PROCESS_WAIT, |
774 ActivityData::ForProcess(process->Pid()), | 711 ActivityData::ForProcess(process->Pid()), |
775 /*lock_allowed=*/true) {} | 712 /*lock_allowed=*/true) {} |
776 #endif | 713 #endif |
777 | 714 |
778 } // namespace debug | 715 } // namespace debug |
779 } // namespace base | 716 } // namespace base |
OLD | NEW |