OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
6 | 6 |
7 #include "base/debug/stack_trace.h" | 7 #include "base/debug/stack_trace.h" |
8 #include "base/files/file.h" | 8 #include "base/files/file.h" |
9 #include "base/files/file_path.h" | 9 #include "base/files/file_path.h" |
10 #include "base/files/memory_mapped_file.h" | 10 #include "base/files/memory_mapped_file.h" |
(...skipping 482 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
493 StringPiece name, | 493 StringPiece name, |
494 int stack_depth) { | 494 int stack_depth) { |
495 CreateWithAllocator( | 495 CreateWithAllocator( |
496 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | 496 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), |
497 stack_depth); | 497 stack_depth); |
498 } | 498 } |
499 | 499 |
500 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | 500 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { |
501 DCHECK(!this_thread_tracker_.Get()); | 501 DCHECK(!this_thread_tracker_.Get()); |
502 | 502 |
503 PersistentMemoryAllocator::Reference mem_reference = 0; | 503 PersistentMemoryAllocator::Reference mem_reference = |
504 void* mem_base = nullptr; | 504 available_memories_.invalid_value(); |
| 505 DCHECK(!mem_reference); // invalid_value should be checkable with ! |
505 | 506 |
506 // Get the current count of available memories, acquiring the array values. | 507 while (true) { |
507 int count = available_memories_count_.load(std::memory_order_acquire); | 508 // Get the first available memory from the top of the FIFO. |
508 while (count > 0) { | 509 mem_reference = available_memories_.pop(); |
509 // There is a memory block that was previously released (and zeroed) so | 510 if (!mem_reference) |
510 // just re-use that rather than allocating a new one. Use "relaxed" because | 511 break; |
511 // the value is guarded by the |count| "acquire". A zero reference replaces | |
512 // the existing value so that it can't be used by another thread that | |
513 // manages to interrupt this one before the count can be decremented. | |
514 // A zero reference is also required for the "push" operation to work | |
515 // once the count finally does get decremented. | |
516 mem_reference = | |
517 available_memories_[count - 1].exchange(0, std::memory_order_relaxed); | |
518 | 512 |
519 // If the reference is zero, it's already been taken but count hasn't yet | 513 // Turn the reference back into one of the activity-tracker type. This can |
520 // been decremented. Give that other thread a chance to finish then reload | 514 // fail if something else has already taken the block and changed its type. |
521 // the "count" value and try again. | 515 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
522 if (!mem_reference) { | 516 kTypeIdActivityTrackerFree)) { |
523 PlatformThread::YieldCurrentThread(); | 517 break; |
524 count = available_memories_count_.load(std::memory_order_acquire); | |
525 continue; | |
526 } | 518 } |
527 | |
528 // Decrement the count indicating that the value has been taken. If this | |
529 // fails then another thread has pushed something new and incremented the | |
530 // count. | |
531 // NOTE: |oldcount| will be loaded with the existing value. | |
532 int oldcount = count; | |
533 if (!available_memories_count_.compare_exchange_strong( | |
534 oldcount, count - 1, std::memory_order_acquire, | |
535 std::memory_order_acquire)) { | |
536 DCHECK_LT(count, oldcount); | |
537 | |
538 // Restore the reference that was zeroed above and try again. | |
539 available_memories_[count - 1].store(mem_reference, | |
540 std::memory_order_relaxed); | |
541 count = oldcount; | |
542 continue; | |
543 } | |
544 | |
545 // Turn the reference back into one of the activity-tracker type. | |
546 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
547 kTypeIdActivityTrackerFree); | |
548 DCHECK(mem_base); | |
549 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
550 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
551 kTypeIdActivityTrackerFree); | |
552 DCHECK(changed); | |
553 | |
554 // Success. | |
555 break; | |
556 } | 519 } |
557 | 520 |
558 // Handle the case where no previously-used memories are available. | 521 // Handle the case where no known available memories were found. |
559 if (count == 0) { | 522 if (!mem_reference) { |
560 // Allocate a block of memory from the persistent segment. | 523 // Allocate a block of memory from the persistent segment. |
561 mem_reference = | 524 mem_reference = |
562 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | 525 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); |
563 if (mem_reference) { | 526 if (mem_reference) { |
564 // Success. Convert the reference to an actual memory address. | 527 // Success. Make the allocation iterable so it can be found later. |
565 mem_base = | |
566 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
567 // Make the allocation iterable so it can be found by other processes. | |
568 allocator_->MakeIterable(mem_reference); | 528 allocator_->MakeIterable(mem_reference); |
569 } else { | 529 } else { |
570 // Failure. This shouldn't happen. | 530 // Failure. Look for any free blocks that weren't held in the cache |
571 NOTREACHED(); | 531 // of available memories and try to claim it. This can happen if the |
572 // But if it does, probably because the allocator wasn't given enough | 532 // |available_memories_| stack isn't sufficiently large to hold all |
573 // memory to satisfy all possible requests, handle it gracefully by | 533 // released memories or if multiple independent processes are sharing |
574 // allocating the required memory from the heap. | 534 // the memory segment. |
575 mem_base = new char[stack_memory_size_]; | 535 PersistentMemoryAllocator::Iterator iter(allocator_.get()); |
576 memset(mem_base, 0, stack_memory_size_); | 536 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) != |
577 // Report the thread-count at which the allocator was full so that the | 537 0) { |
578 // failure can be seen and underlying memory resized appropriately. | 538 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, |
579 UMA_HISTOGRAM_COUNTS_1000( | 539 kTypeIdActivityTrackerFree)) { |
580 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", | 540 break; |
581 thread_tracker_count_.load(std::memory_order_relaxed)); | 541 } |
| 542 mem_reference = 0; |
| 543 } |
| 544 if (!mem_reference) { |
| 545 // Dobule Failure. This shouldn't happen. But be graceful if it does, |
| 546 // probably because the underlying allocator wasn't given enough memory |
| 547 // to satisfy all possible requests. |
| 548 NOTREACHED(); |
| 549 // Report the thread-count at which the allocator was full so that the |
| 550 // failure can be seen and underlying memory resized appropriately. |
| 551 UMA_HISTOGRAM_COUNTS_1000( |
| 552 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", |
| 553 thread_tracker_count_.load(std::memory_order_relaxed)); |
| 554 // Return null, just as if tracking wasn't enabled. |
| 555 return nullptr; |
| 556 } |
582 } | 557 } |
583 } | 558 } |
584 | 559 |
| 560 // Convert the memory block found above into an actual memory address. |
| 561 DCHECK(mem_reference); |
| 562 void* mem_base = |
| 563 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); |
| 564 DCHECK(mem_base); |
| 565 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); |
| 566 |
585 // Create a tracker with the acquired memory and set it as the tracker | 567 // Create a tracker with the acquired memory and set it as the tracker |
586 // for this particular thread in thread-local-storage. | 568 // for this particular thread in thread-local-storage. |
587 DCHECK(mem_base); | |
588 ManagedActivityTracker* tracker = | 569 ManagedActivityTracker* tracker = |
589 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | 570 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); |
590 DCHECK(tracker->IsValid()); | 571 DCHECK(tracker->IsValid()); |
591 this_thread_tracker_.Set(tracker); | 572 this_thread_tracker_.Set(tracker); |
592 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | 573 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); |
593 | 574 |
594 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", | 575 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", |
595 old_count + 1, kMaxThreadCount); | 576 old_count + 1, kMaxThreadCount); |
596 return tracker; | 577 return tracker; |
597 } | 578 } |
598 | 579 |
599 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | 580 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { |
600 ThreadActivityTracker* tracker = | 581 ThreadActivityTracker* tracker = |
601 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | 582 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); |
602 if (tracker) | 583 if (tracker) |
603 delete tracker; | 584 delete tracker; |
604 } | 585 } |
605 | 586 |
606 GlobalActivityTracker::GlobalActivityTracker( | 587 GlobalActivityTracker::GlobalActivityTracker( |
607 std::unique_ptr<PersistentMemoryAllocator> allocator, | 588 std::unique_ptr<PersistentMemoryAllocator> allocator, |
608 int stack_depth) | 589 int stack_depth) |
609 : allocator_(std::move(allocator)), | 590 : allocator_(std::move(allocator)), |
610 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | 591 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), |
611 this_thread_tracker_(&OnTLSDestroy), | 592 this_thread_tracker_(&OnTLSDestroy), |
612 thread_tracker_count_(0), | 593 thread_tracker_count_(0), |
613 available_memories_count_(0) { | 594 available_memories_(kMaxThreadCount, |
614 // Clear the available-memories array. | 595 PersistentMemoryAllocator::kReferenceNull) { |
615 memset(available_memories_, 0, sizeof(available_memories_)); | |
616 | |
617 // Ensure the passed memory is valid and empty (iterator finds nothing). | 596 // Ensure the passed memory is valid and empty (iterator finds nothing). |
618 uint32_t type; | 597 uint32_t type; |
619 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | 598 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); |
620 | 599 |
621 // Ensure that there is no other global object and then make this one such. | 600 // Ensure that there is no other global object and then make this one such. |
622 DCHECK(!g_tracker_); | 601 DCHECK(!g_tracker_); |
623 g_tracker_ = this; | 602 g_tracker_ = this; |
624 } | 603 } |
625 | 604 |
626 GlobalActivityTracker::~GlobalActivityTracker() { | 605 GlobalActivityTracker::~GlobalActivityTracker() { |
627 DCHECK_EQ(g_tracker_, this); | 606 DCHECK_EQ(g_tracker_, this); |
628 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | 607 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); |
629 g_tracker_ = nullptr; | 608 g_tracker_ = nullptr; |
630 } | 609 } |
631 | 610 |
632 void GlobalActivityTracker::ReturnTrackerMemory( | 611 void GlobalActivityTracker::ReturnTrackerMemory( |
633 ManagedActivityTracker* tracker) { | 612 ManagedActivityTracker* tracker) { |
634 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | 613 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; |
635 void* mem_base = tracker->mem_base_; | 614 void* mem_base = tracker->mem_base_; |
| 615 DCHECK(mem_reference); |
| 616 DCHECK(mem_base); |
636 | 617 |
637 // Zero the memory so that it is ready for use if needed again later. It's | 618 // Zero the memory so that it is ready for use if needed again later. It's |
638 // better to clear the memory now, when a thread is exiting, than to do it | 619 // better to clear the memory now, when a thread is exiting, than to do it |
639 // when it is first needed by a thread doing actual work. | 620 // when it is first needed by a thread doing actual work. |
640 memset(mem_base, 0, stack_memory_size_); | 621 memset(mem_base, 0, stack_memory_size_); |
641 | 622 |
642 // Remove the destructed tracker from the set of known ones. | 623 // Remove the destructed tracker from the set of known ones. |
643 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 624 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
644 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 625 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
645 | 626 |
646 // Deal with the memory that was used by the tracker. | 627 // The memory was within the persistent memory allocator. Change its type |
647 if (mem_reference) { | 628 // so it is effectively marked as "free". |
648 // The memory was within the persistent memory allocator. Change its type | 629 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, |
649 // so that iteration won't find it. | 630 kTypeIdActivityTracker); |
650 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | |
651 kTypeIdActivityTracker); | |
652 // There is no way to free memory from a persistent allocator so instead | |
653 // push it on the internal list of available memory blocks. | |
654 while (true) { | |
655 // Get the existing count of available memories and ensure we won't | |
656 // burst the array. Acquire the values in the array. | |
657 int count = available_memories_count_.load(std::memory_order_acquire); | |
658 if (count >= kMaxThreadCount) { | |
659 NOTREACHED(); | |
660 // Storage is full. Just forget about this memory. It won't be re-used | |
661 // but there's no real loss. | |
662 break; | |
663 } | |
664 | 631 |
665 // Write the reference of the memory being returned to this slot in the | 632 // Push this on the internal cache of available memory blocks so it can |
666 // array. Empty slots have a value of zero so do an atomic compare-and- | 633 // be found and reused quickly. If the push somehow exceeds the maximum |
667 // exchange to ensure that a race condition doesn't exist with another | 634 // size of the cache, it will fail but a fallback check in CreateTracker |
668 // thread doing the same. | 635 // will find it by (slow) iteration. |
669 PersistentMemoryAllocator::Reference mem_expected = 0; | 636 available_memories_.push(mem_reference); |
670 if (!available_memories_[count].compare_exchange_strong( | |
671 mem_expected, mem_reference, std::memory_order_release, | |
672 std::memory_order_relaxed)) { | |
673 PlatformThread::YieldCurrentThread(); | |
674 continue; // Try again. | |
675 } | |
676 | |
677 // Increment the count, releasing the value written to the array. This | |
678 // could fail if a simultaneous "pop" operation decremented the counter. | |
679 // If that happens, clear the array slot and start over. Do a "strong" | |
680 // exchange to avoid spurious retries that can occur with a "weak" one. | |
681 int expected = count; // Updated by compare/exchange. | |
682 if (!available_memories_count_.compare_exchange_strong( | |
683 expected, count + 1, std::memory_order_release, | |
684 std::memory_order_relaxed)) { | |
685 available_memories_[count].store(0, std::memory_order_relaxed); | |
686 continue; | |
687 } | |
688 | |
689 // Count was successfully incremented to reflect the newly added value. | |
690 break; | |
691 } | |
692 } else { | |
693 // The memory was allocated from the process heap. This shouldn't happen | |
694 // because the persistent memory segment should be big enough for all | |
695 // thread stacks but it's better to support falling back to allocation | |
696 // from the heap rather than crash. Everything will work as normal but | |
697 // the data won't be persisted. | |
698 delete[] reinterpret_cast<char*>(mem_base); | |
699 } | |
700 } | 637 } |
701 | 638 |
702 // static | 639 // static |
703 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 640 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
704 delete reinterpret_cast<ManagedActivityTracker*>(value); | 641 delete reinterpret_cast<ManagedActivityTracker*>(value); |
705 } | 642 } |
706 | 643 |
707 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | 644 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, |
708 uint8_t action, | 645 uint8_t action, |
709 uint32_t id, | 646 uint32_t id, |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
770 const base::Process* process) | 707 const base::Process* process) |
771 : GlobalActivityTracker::ScopedThreadActivity( | 708 : GlobalActivityTracker::ScopedThreadActivity( |
772 nullptr, | 709 nullptr, |
773 Activity::ACT_PROCESS_WAIT, | 710 Activity::ACT_PROCESS_WAIT, |
774 ActivityData::ForProcess(process->Pid()), | 711 ActivityData::ForProcess(process->Pid()), |
775 /*lock_allowed=*/true) {} | 712 /*lock_allowed=*/true) {} |
776 #endif | 713 #endif |
777 | 714 |
778 } // namespace debug | 715 } // namespace debug |
779 } // namespace base | 716 } // namespace base |
OLD | NEW |