Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(225)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2255503002: New cache for the activity tracker. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: some clean up Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include "base/debug/stack_trace.h" 7 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 8 #include "base/files/file.h"
9 #include "base/files/file_path.h" 9 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 10 #include "base/files/memory_mapped_file.h"
(...skipping 480 matching lines...) Expand 10 before | Expand all | Expand 10 after
491 StringPiece name, 491 StringPiece name,
492 int stack_depth) { 492 int stack_depth) {
493 CreateWithAllocator( 493 CreateWithAllocator(
494 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), 494 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
495 stack_depth); 495 stack_depth);
496 } 496 }
497 497
498 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { 498 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
499 DCHECK(!this_thread_tracker_.Get()); 499 DCHECK(!this_thread_tracker_.Get());
500 500
501 PersistentMemoryAllocator::Reference mem_reference = 0; 501 PersistentMemoryAllocator::Reference mem_reference =
502 void* mem_base = nullptr; 502 available_memories_.invalid_value();
503 DCHECK(!mem_reference); // invalid_value should be checkable with !
503 504
504 // Get the current count of available memories, acquiring the array values. 505 while (true) {
505 int count = available_memories_count_.load(std::memory_order_acquire); 506 // Get the first available memory from the top of the stack.
manzagop (departed) 2016/08/16 21:44:31 nit: top of the stack -> fifo.
bcwhite 2016/08/17 19:29:23 Done. Though a FIFO and LIFO are both types of st
506 while (count > 0) { 507 mem_reference = available_memories_.pop();
507 // There is a memory block that was previously released (and zeroed) so 508 if (!mem_reference)
508 // just re-use that rather than allocating a new one. Use "relaxed" because 509 break;
509 // the value is guarded by the |count| "acquire". A zero reference replaces
510 // the existing value so that it can't be used by another thread that
511 // manages to interrupt this one before the count can be decremented.
512 // A zero reference is also required for the "push" operation to work
513 // once the count finally does get decremented.
514 mem_reference =
515 available_memories_[count - 1].exchange(0, std::memory_order_relaxed);
516 510
517 // If the reference is zero, it's already been taken but count hasn't yet 511 // Turn the reference back into one of the activity-tracker type. This can
518 // been decremented. Give that other thread a chance to finish then reload 512 // fail if something else has already taken the block and changed its type.
manzagop (departed) 2016/08/16 21:44:31 Who could have taken the block, and how? Ah, by it
bcwhite 2016/08/17 19:29:23 Correct.
519 // the "count" value and try again. 513 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
520 if (!mem_reference) { 514 kTypeIdActivityTrackerFree)) {
521 PlatformThread::YieldCurrentThread(); 515 break;
522 count = available_memories_count_.load(std::memory_order_acquire);
523 continue;
524 } 516 }
525
526 // Decrement the count indicating that the value has been taken. If this
527 // fails then another thread has pushed something new and incremented the
528 // count.
529 // NOTE: |oldcount| will be loaded with the existing value.
530 int oldcount = count;
531 if (!available_memories_count_.compare_exchange_strong(
532 oldcount, count - 1, std::memory_order_acquire,
533 std::memory_order_acquire)) {
534 DCHECK_LT(count, oldcount);
535
536 // Restore the reference that was zeroed above and try again.
537 available_memories_[count - 1].store(mem_reference,
538 std::memory_order_relaxed);
539 count = oldcount;
540 continue;
541 }
542
543 // Turn the reference back into one of the activity-tracker type.
544 mem_base = allocator_->GetAsObject<char>(mem_reference,
545 kTypeIdActivityTrackerFree);
546 DCHECK(mem_base);
547 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
548 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
549 kTypeIdActivityTrackerFree);
550 DCHECK(changed);
551
552 // Success.
553 break;
554 } 517 }
555 518
556 // Handle the case where no previously-used memories are available. 519 // Handle the case where no known available memories were found.
557 if (count == 0) { 520 if (!mem_reference) {
558 // Allocate a block of memory from the persistent segment. 521 // Allocate a block of memory from the persistent segment.
559 mem_reference = 522 mem_reference =
560 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); 523 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker);
561 if (mem_reference) { 524 if (mem_reference) {
562 // Success. Convert the reference to an actual memory address. 525 // Success. Make the allocation iterable so it can be found later.
563 mem_base =
564 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
565 // Make the allocation iterable so it can be found by other processes.
566 allocator_->MakeIterable(mem_reference); 526 allocator_->MakeIterable(mem_reference);
567 } else { 527 } else {
568 // Failure. This shouldn't happen. 528 // Failure. Look for any free blocks that weren't held in the cache
569 NOTREACHED(); 529 // of available memories and try to claim it.
manzagop (departed) 2016/08/16 21:44:31 Maybe clarify the comment to say the claimed block
bcwhite 2016/08/17 19:29:23 Done.
570 // But if it does, probably because the allocator wasn't given enough 530 PersistentMemoryAllocator::Iterator iter(allocator_.get());
571 // memory to satisfy all possible requests, handle it gracefully by 531 while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) !=
572 // allocating the required memory from the heap. 532 0) {
573 mem_base = new char[stack_memory_size_]; 533 if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
574 memset(mem_base, 0, stack_memory_size_); 534 kTypeIdActivityTrackerFree)) {
575 // Report the thread-count at which the allocator was full so that the 535 break;
576 // failure can be seen and underlying memory resized appropriately. 536 }
577 UMA_HISTOGRAM_COUNTS_1000( 537 mem_reference = 0;
578 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount", 538 }
579 thread_tracker_count_.load(std::memory_order_relaxed)); 539 if (!mem_reference) {
540 // Dobule Failure. This shouldn't happen. But be graceful if it does,
541 // probably because the underlying allocator wasn't given enough memory
542 // to satisfy all possible requests.
543 NOTREACHED();
544 // Report the thread-count at which the allocator was full so that the
545 // failure can be seen and underlying memory resized appropriately.
546 UMA_HISTOGRAM_COUNTS_1000(
547 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
548 thread_tracker_count_.load(std::memory_order_relaxed));
549 // Return null, just as if tracking wasn't enabled.
550 return nullptr;
551 }
580 } 552 }
581 } 553 }
582 554
555 // Convert the memory block found above into an actual memory address.
556 DCHECK(mem_reference);
557 void* mem_base =
558 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
559 DCHECK(mem_base);
560 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
561
583 // Create a tracker with the acquired memory and set it as the tracker 562 // Create a tracker with the acquired memory and set it as the tracker
584 // for this particular thread in thread-local-storage. 563 // for this particular thread in thread-local-storage.
585 DCHECK(mem_base);
586 ManagedActivityTracker* tracker = 564 ManagedActivityTracker* tracker =
587 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); 565 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
588 DCHECK(tracker->IsValid()); 566 DCHECK(tracker->IsValid());
589 this_thread_tracker_.Set(tracker); 567 this_thread_tracker_.Set(tracker);
590 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); 568 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
591 569
592 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count", 570 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count",
593 old_count + 1, kMaxThreadCount); 571 old_count + 1, kMaxThreadCount);
594 return tracker; 572 return tracker;
595 } 573 }
596 574
597 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 575 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
598 ThreadActivityTracker* tracker = 576 ThreadActivityTracker* tracker =
599 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 577 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
600 if (tracker) { 578 if (tracker) {
601 this_thread_tracker_.Free(); 579 this_thread_tracker_.Free();
602 delete tracker; 580 delete tracker;
603 } 581 }
604 } 582 }
605 583
606 GlobalActivityTracker::GlobalActivityTracker( 584 GlobalActivityTracker::GlobalActivityTracker(
607 std::unique_ptr<PersistentMemoryAllocator> allocator, 585 std::unique_ptr<PersistentMemoryAllocator> allocator,
608 int stack_depth) 586 int stack_depth)
609 : allocator_(std::move(allocator)), 587 : allocator_(std::move(allocator)),
610 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 588 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
611 this_thread_tracker_(&OnTLSDestroy), 589 this_thread_tracker_(&OnTLSDestroy),
612 thread_tracker_count_(0), 590 thread_tracker_count_(0) {
613 available_memories_count_(0) {
614 // Clear the available-memories array.
615 memset(available_memories_, 0, sizeof(available_memories_));
616
617 // Ensure the passed memory is valid and empty (iterator finds nothing). 591 // Ensure the passed memory is valid and empty (iterator finds nothing).
618 uint32_t type; 592 uint32_t type;
619 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 593 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
620 594
621 // Ensure that there is no other global object and then make this one such. 595 // Ensure that there is no other global object and then make this one such.
622 DCHECK(!g_tracker_); 596 DCHECK(!g_tracker_);
623 g_tracker_ = this; 597 g_tracker_ = this;
624 } 598 }
625 599
626 GlobalActivityTracker::~GlobalActivityTracker() { 600 GlobalActivityTracker::~GlobalActivityTracker() {
627 DCHECK_EQ(g_tracker_, this); 601 DCHECK_EQ(g_tracker_, this);
628 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); 602 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
629 g_tracker_ = nullptr; 603 g_tracker_ = nullptr;
630 } 604 }
631 605
632 void GlobalActivityTracker::ReturnTrackerMemory( 606 void GlobalActivityTracker::ReturnTrackerMemory(
633 ManagedActivityTracker* tracker) { 607 ManagedActivityTracker* tracker) {
634 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; 608 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
635 void* mem_base = tracker->mem_base_; 609 void* mem_base = tracker->mem_base_;
610 DCHECK(mem_reference);
611 DCHECK(mem_base);
636 612
637 // Zero the memory so that it is ready for use if needed again later. It's 613 // Zero the memory so that it is ready for use if needed again later. It's
638 // better to clear the memory now, when a thread is exiting, than to do it 614 // better to clear the memory now, when a thread is exiting, than to do it
639 // when it is first needed by a thread doing actual work. 615 // when it is first needed by a thread doing actual work.
640 memset(mem_base, 0, stack_memory_size_); 616 memset(mem_base, 0, stack_memory_size_);
641 617
642 // Remove the destructed tracker from the set of known ones. 618 // Remove the destructed tracker from the set of known ones.
643 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); 619 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
644 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); 620 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
645 621
646 // Deal with the memory that was used by the tracker. 622 // The memory was within the persistent memory allocator. Change its type
647 if (mem_reference) { 623 // so it is effectively marked as "free".
648 // The memory was within the persistent memory allocator. Change its type 624 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
649 // so that iteration won't find it. 625 kTypeIdActivityTracker);
650 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
651 kTypeIdActivityTracker);
652 // There is no way to free memory from a persistent allocator so instead
653 // push it on the internal list of available memory blocks.
654 while (true) {
655 // Get the existing count of available memories and ensure we won't
656 // burst the array. Acquire the values in the array.
657 int count = available_memories_count_.load(std::memory_order_acquire);
658 if (count >= kMaxThreadCount) {
659 NOTREACHED();
660 // Storage is full. Just forget about this memory. It won't be re-used
661 // but there's no real loss.
662 break;
663 }
664 626
665 // Write the reference of the memory being returned to this slot in the 627 // Push this on the internal list of available memory blocks so it can
manzagop (departed) 2016/08/16 21:44:31 nit: list -> cache
bcwhite 2016/08/17 19:29:23 Done.
666 // array. Empty slots have a value of zero so do an atomic compare-and- 628 // be found and reused quickly. If the push somehow exceeds the maximum
667 // exchange to ensure that a race condition doesn't exist with another 629 // size of the stack, it will fail but a fallback check in CreateTracker
manzagop (departed) 2016/08/16 21:44:31 stack -> fifo / free block cache
bcwhite 2016/08/17 19:29:23 Done.
668 // thread doing the same. 630 // will find it by (slow) iteration.
669 PersistentMemoryAllocator::Reference mem_expected = 0; 631 available_memories_.push(mem_reference);
670 if (!available_memories_[count].compare_exchange_strong(
671 mem_expected, mem_reference, std::memory_order_release,
672 std::memory_order_relaxed)) {
673 PlatformThread::YieldCurrentThread();
674 continue; // Try again.
675 }
676
677 // Increment the count, releasing the value written to the array. This
678 // could fail if a simultaneous "pop" operation decremented the counter.
679 // If that happens, clear the array slot and start over. Do a "strong"
680 // exchange to avoid spurious retries that can occur with a "weak" one.
681 int expected = count; // Updated by compare/exchange.
682 if (!available_memories_count_.compare_exchange_strong(
683 expected, count + 1, std::memory_order_release,
684 std::memory_order_relaxed)) {
685 available_memories_[count].store(0, std::memory_order_relaxed);
686 continue;
687 }
688
689 // Count was successfully incremented to reflect the newly added value.
690 break;
691 }
692 } else {
693 // The memory was allocated from the process heap. This shouldn't happen
694 // because the persistent memory segment should be big enough for all
695 // thread stacks but it's better to support falling back to allocation
696 // from the heap rather than crash. Everything will work as normal but
697 // the data won't be persisted.
698 delete[] reinterpret_cast<char*>(mem_base);
699 }
700 } 632 }
701 633
702 // static 634 // static
703 void GlobalActivityTracker::OnTLSDestroy(void* value) { 635 void GlobalActivityTracker::OnTLSDestroy(void* value) {
704 delete reinterpret_cast<ManagedActivityTracker*>(value); 636 delete reinterpret_cast<ManagedActivityTracker*>(value);
705 } 637 }
706 638
707 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, 639 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
708 uint8_t action, 640 uint8_t action,
709 uint32_t id, 641 uint32_t id,
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
770 const base::Process* process) 702 const base::Process* process)
771 : GlobalActivityTracker::ScopedThreadActivity( 703 : GlobalActivityTracker::ScopedThreadActivity(
772 nullptr, 704 nullptr,
773 Activity::ACT_PROCESS_WAIT, 705 Activity::ACT_PROCESS_WAIT,
774 ActivityData::ForProcess(process->Pid()), 706 ActivityData::ForProcess(process->Pid()),
775 /*lock_allowed=*/true) {} 707 /*lock_allowed=*/true) {}
776 #endif 708 #endif
777 709
778 } // namespace debug 710 } // namespace debug
779 } // namespace base 711 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698