Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| 11 #include "base/atomic_sequence_num.h" | 11 #include "base/atomic_sequence_num.h" |
| 12 #include "base/debug/stack_trace.h" | 12 #include "base/debug/stack_trace.h" |
| 13 #include "base/files/file.h" | 13 #include "base/files/file.h" |
| 14 #include "base/files/file_path.h" | 14 #include "base/files/file_path.h" |
| 15 #include "base/files/memory_mapped_file.h" | 15 #include "base/files/memory_mapped_file.h" |
| 16 #include "base/lazy_instance.h" | |
| 16 #include "base/logging.h" | 17 #include "base/logging.h" |
| 17 #include "base/memory/ptr_util.h" | 18 #include "base/memory/ptr_util.h" |
| 18 #include "base/metrics/field_trial.h" | 19 #include "base/metrics/field_trial.h" |
| 19 #include "base/metrics/histogram_macros.h" | 20 #include "base/metrics/histogram_macros.h" |
| 20 #include "base/pending_task.h" | 21 #include "base/pending_task.h" |
| 21 #include "base/pickle.h" | 22 #include "base/pickle.h" |
| 22 #include "base/process/process.h" | 23 #include "base/process/process.h" |
| 23 #include "base/process/process_handle.h" | 24 #include "base/process/process_handle.h" |
| 24 #include "base/stl_util.h" | 25 #include "base/stl_util.h" |
| 25 #include "base/strings/string_util.h" | 26 #include "base/strings/string_util.h" |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 45 // A constant used to indicate that module information is changing. | 46 // A constant used to indicate that module information is changing. |
| 46 const uint32_t kModuleInformationChanging = 0x80000000; | 47 const uint32_t kModuleInformationChanging = 0x80000000; |
| 47 | 48 |
| 48 // The key used to record process information. | 49 // The key used to record process information. |
| 49 const char kProcessPhaseDataKey[] = "process-phase"; | 50 const char kProcessPhaseDataKey[] = "process-phase"; |
| 50 | 51 |
| 51 // An atomically incrementing number, used to check for recreations of objects | 52 // An atomically incrementing number, used to check for recreations of objects |
| 52 // in the same memory space. | 53 // in the same memory space. |
| 53 StaticAtomicSequenceNumber g_next_id; | 54 StaticAtomicSequenceNumber g_next_id; |
| 54 | 55 |
| 56 // An reusable user-data sink that just discards all data saved to it. | |
| 57 LazyInstance<ActivityUserData>::Leaky g_data_sink; | |
| 58 | |
| 55 union ThreadRef { | 59 union ThreadRef { |
| 56 int64_t as_id; | 60 int64_t as_id; |
| 57 #if defined(OS_WIN) | 61 #if defined(OS_WIN) |
| 58 // On Windows, the handle itself is often a pseudo-handle with a common | 62 // On Windows, the handle itself is often a pseudo-handle with a common |
| 59 // value meaning "this thread" and so the thread-id is used. The former | 63 // value meaning "this thread" and so the thread-id is used. The former |
| 60 // can be converted to a thread-id with a system call. | 64 // can be converted to a thread-id with a system call. |
| 61 PlatformThreadId as_tid; | 65 PlatformThreadId as_tid; |
| 62 #elif defined(OS_POSIX) | 66 #elif defined(OS_POSIX) |
| 63 // On Posix, the handle is always a unique identifier so no conversion | 67 // On Posix, the handle is always a unique identifier so no conversion |
| 64 // needs to be done. However, it's value is officially opaque so there | 68 // needs to be done. However, it's value is officially opaque so there |
| (...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 309 // thus clang requires explicit out-of-line constructors and destructors even | 313 // thus clang requires explicit out-of-line constructors and destructors even |
| 310 // when they do nothing. | 314 // when they do nothing. |
| 311 ActivityUserData::ValueInfo::ValueInfo() {} | 315 ActivityUserData::ValueInfo::ValueInfo() {} |
| 312 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; | 316 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| 313 ActivityUserData::ValueInfo::~ValueInfo() {} | 317 ActivityUserData::ValueInfo::~ValueInfo() {} |
| 314 ActivityUserData::MemoryHeader::MemoryHeader() {} | 318 ActivityUserData::MemoryHeader::MemoryHeader() {} |
| 315 ActivityUserData::MemoryHeader::~MemoryHeader() {} | 319 ActivityUserData::MemoryHeader::~MemoryHeader() {} |
| 316 ActivityUserData::FieldHeader::FieldHeader() {} | 320 ActivityUserData::FieldHeader::FieldHeader() {} |
| 317 ActivityUserData::FieldHeader::~FieldHeader() {} | 321 ActivityUserData::FieldHeader::~FieldHeader() {} |
| 318 | 322 |
| 323 ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {} | |
| 324 | |
| 319 ActivityUserData::ActivityUserData(void* memory, size_t size) | 325 ActivityUserData::ActivityUserData(void* memory, size_t size) |
| 320 : memory_(reinterpret_cast<char*>(memory)), | 326 : memory_(reinterpret_cast<char*>(memory)), |
| 321 available_(RoundDownToAlignment(size, kMemoryAlignment)), | 327 available_(RoundDownToAlignment(size, kMemoryAlignment)), |
| 322 header_(reinterpret_cast<MemoryHeader*>(memory)) { | 328 header_(reinterpret_cast<MemoryHeader*>(memory)) { |
| 323 // It's possible that no user data is being stored. | 329 // It's possible that no user data is being stored. |
| 324 if (!memory_) | 330 if (!memory_) |
| 325 return; | 331 return; |
| 326 | 332 |
| 327 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); | 333 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
| 328 DCHECK_LT(sizeof(MemoryHeader), available_); | 334 DCHECK_LT(sizeof(MemoryHeader), available_); |
| (...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 550 // This information is kept for every thread that is tracked. It is filled | 556 // This information is kept for every thread that is tracked. It is filled |
| 551 // the very first time the thread is seen. All fields must be of exact sizes | 557 // the very first time the thread is seen. All fields must be of exact sizes |
| 552 // so there is no issue moving between 32 and 64-bit builds. | 558 // so there is no issue moving between 32 and 64-bit builds. |
| 553 struct ThreadActivityTracker::Header { | 559 struct ThreadActivityTracker::Header { |
| 554 // Defined in .h for analyzer access. Increment this if structure changes! | 560 // Defined in .h for analyzer access. Increment this if structure changes! |
| 555 static constexpr uint32_t kPersistentTypeId = | 561 static constexpr uint32_t kPersistentTypeId = |
| 556 GlobalActivityTracker::kTypeIdActivityTracker; | 562 GlobalActivityTracker::kTypeIdActivityTracker; |
| 557 | 563 |
| 558 // Expected size for 32/64-bit check. | 564 // Expected size for 32/64-bit check. |
| 559 static constexpr size_t kExpectedInstanceSize = | 565 static constexpr size_t kExpectedInstanceSize = |
| 560 OwningProcess::kExpectedInstanceSize + 72; | 566 OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize + |
| 567 72; | |
| 561 | 568 |
| 562 // This information uniquely identifies a process. | 569 // This information uniquely identifies a process. |
| 563 OwningProcess owner; | 570 OwningProcess owner; |
| 564 | 571 |
| 565 // The thread-id (thread_ref.as_id) to which this data belongs. This number | 572 // The thread-id (thread_ref.as_id) to which this data belongs. This number |
| 566 // is not guaranteed to mean anything but combined with the process-id from | 573 // is not guaranteed to mean anything but combined with the process-id from |
| 567 // OwningProcess is unique among all active trackers. | 574 // OwningProcess is unique among all active trackers. |
| 568 ThreadRef thread_ref; | 575 ThreadRef thread_ref; |
| 569 | 576 |
| 570 // The start-time and start-ticks when the data was created. Each activity | 577 // The start-time and start-ticks when the data was created. Each activity |
| 571 // record has a |time_internal| value that can be converted to a "wall time" | 578 // record has a |time_internal| value that can be converted to a "wall time" |
| 572 // with these two values. | 579 // with these two values. |
| 573 int64_t start_time; | 580 int64_t start_time; |
| 574 int64_t start_ticks; | 581 int64_t start_ticks; |
| 575 | 582 |
| 576 // The number of Activity slots (spaces that can hold an Activity) that | 583 // The number of Activity slots (spaces that can hold an Activity) that |
| 577 // immediately follow this structure in memory. | 584 // immediately follow this structure in memory. |
| 578 uint32_t stack_slots; | 585 uint32_t stack_slots; |
| 579 | 586 |
| 580 // Some padding to keep everything 64-bit aligned. | 587 // Some padding to keep everything 64-bit aligned. |
| 581 uint32_t padding; | 588 uint32_t padding; |
| 582 | 589 |
| 583 // The current depth of the stack. This may be greater than the number of | 590 // The current depth of the stack. This may be greater than the number of |
| 584 // slots. If the depth exceeds the number of slots, the newest entries | 591 // slots. If the depth exceeds the number of slots, the newest entries |
| 585 // won't be recorded. | 592 // won't be recorded. |
| 586 std::atomic<uint32_t> current_depth; | 593 std::atomic<uint32_t> current_depth; |
| 587 | 594 |
| 588 // A memory location used to indicate if changes have been made to the stack | 595 // A memory location used to indicate if changes have been made to the stack |
|
manzagop (departed)
2017/02/24 19:06:14
update comment
bcwhite
2017/02/24 19:10:58
Done.
| |
| 589 // that would invalidate an in-progress read of its contents. The active | 596 // that would invalidate an in-progress read of its contents. The active |
| 590 // tracker will zero the value whenever something gets popped from the | 597 // tracker will zero the value whenever something gets popped from the |
| 591 // stack. A monitoring tracker can write a non-zero value here, copy the | 598 // stack. A monitoring tracker can write a non-zero value here, copy the |
| 592 // stack contents, and read the value to know, if it is still non-zero, that | 599 // stack contents, and read the value to know, if it is still non-zero, that |
| 593 // the contents didn't change while being copied. This can handle concurrent | 600 // the contents didn't change while being copied. This can handle concurrent |
| 594 // snapshot operations only if each snapshot writes a different bit (which | 601 // snapshot operations only if each snapshot writes a different bit (which |
| 595 // is not the current implementation so no parallel snapshots allowed). | 602 // is not the current implementation so no parallel snapshots allowed). |
| 596 std::atomic<uint32_t> stack_unchanged; | 603 std::atomic<uint32_t> data_unchanged; |
| 604 | |
| 605 // The last "exception" activity. This can't be stored on the stack because | |
| 606 // that could get popped as things unwind. | |
| 607 Activity last_exception; | |
| 597 | 608 |
| 598 // The name of the thread (up to a maximum length). Dynamic-length names | 609 // The name of the thread (up to a maximum length). Dynamic-length names |
| 599 // are not practical since the memory has to come from the same persistent | 610 // are not practical since the memory has to come from the same persistent |
| 600 // allocator that holds this structure and to which this object has no | 611 // allocator that holds this structure and to which this object has no |
| 601 // reference. | 612 // reference. |
| 602 char thread_name[32]; | 613 char thread_name[32]; |
| 603 }; | 614 }; |
| 604 | 615 |
| 605 ThreadActivityTracker::Snapshot::Snapshot() {} | 616 ThreadActivityTracker::Snapshot::Snapshot() {} |
| 606 ThreadActivityTracker::Snapshot::~Snapshot() {} | 617 ThreadActivityTracker::Snapshot::~Snapshot() {} |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 664 // Provided memory should either be completely initialized or all zeros. | 675 // Provided memory should either be completely initialized or all zeros. |
| 665 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { | 676 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { |
| 666 // This is a new file. Double-check other fields and then initialize. | 677 // This is a new file. Double-check other fields and then initialize. |
| 667 DCHECK_EQ(0, header_->owner.process_id); | 678 DCHECK_EQ(0, header_->owner.process_id); |
| 668 DCHECK_EQ(0, header_->owner.create_stamp); | 679 DCHECK_EQ(0, header_->owner.create_stamp); |
| 669 DCHECK_EQ(0, header_->thread_ref.as_id); | 680 DCHECK_EQ(0, header_->thread_ref.as_id); |
| 670 DCHECK_EQ(0, header_->start_time); | 681 DCHECK_EQ(0, header_->start_time); |
| 671 DCHECK_EQ(0, header_->start_ticks); | 682 DCHECK_EQ(0, header_->start_ticks); |
| 672 DCHECK_EQ(0U, header_->stack_slots); | 683 DCHECK_EQ(0U, header_->stack_slots); |
| 673 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); | 684 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); |
| 674 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); | 685 DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed)); |
| 675 DCHECK_EQ(0, stack_[0].time_internal); | 686 DCHECK_EQ(0, stack_[0].time_internal); |
| 676 DCHECK_EQ(0U, stack_[0].origin_address); | 687 DCHECK_EQ(0U, stack_[0].origin_address); |
| 677 DCHECK_EQ(0U, stack_[0].call_stack[0]); | 688 DCHECK_EQ(0U, stack_[0].call_stack[0]); |
| 678 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | 689 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); |
| 679 | 690 |
| 680 #if defined(OS_WIN) | 691 #if defined(OS_WIN) |
| 681 header_->thread_ref.as_tid = PlatformThread::CurrentId(); | 692 header_->thread_ref.as_tid = PlatformThread::CurrentId(); |
| 682 #elif defined(OS_POSIX) | 693 #elif defined(OS_POSIX) |
| 683 header_->thread_ref.as_handle = | 694 header_->thread_ref.as_handle = |
| 684 PlatformThread::CurrentHandle().platform_handle(); | 695 PlatformThread::CurrentHandle().platform_handle(); |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 778 // Validate that everything is running correctly. | 789 // Validate that everything is running correctly. |
| 779 DCHECK_EQ(id, depth); | 790 DCHECK_EQ(id, depth); |
| 780 | 791 |
| 781 // A thread-checker creates a lock to check the thread-id which means | 792 // A thread-checker creates a lock to check the thread-id which means |
| 782 // re-entry into this code if lock acquisitions are being tracked. | 793 // re-entry into this code if lock acquisitions are being tracked. |
| 783 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || | 794 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || |
| 784 thread_checker_.CalledOnValidThread()); | 795 thread_checker_.CalledOnValidThread()); |
| 785 | 796 |
| 786 // The stack has shrunk meaning that some other thread trying to copy the | 797 // The stack has shrunk meaning that some other thread trying to copy the |
| 787 // contents for reporting purposes could get bad data. That thread would | 798 // contents for reporting purposes could get bad data. That thread would |
| 788 // have written a non-zero value into |stack_unchanged|; clearing it here | 799 // have written a non-zero value into |data_unchanged|; clearing it here |
| 789 // will let that thread detect that something did change. This needs to | 800 // will let that thread detect that something did change. This needs to |
| 790 // happen after the atomic |depth| operation above so a "release" store | 801 // happen after the atomic |depth| operation above so a "release" store |
| 791 // is required. | 802 // is required. |
| 792 header_->stack_unchanged.store(0, std::memory_order_release); | 803 header_->data_unchanged.store(0, std::memory_order_release); |
| 793 } | 804 } |
| 794 | 805 |
| 795 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( | 806 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( |
| 796 ActivityId id, | 807 ActivityId id, |
| 797 ActivityTrackerMemoryAllocator* allocator) { | 808 ActivityTrackerMemoryAllocator* allocator) { |
| 798 // User-data is only stored for activities actually held in the stack. | 809 // Don't allow user data for lock acquisition as recursion may occur. |
| 799 if (id < stack_slots_) { | 810 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { |
| 800 // Don't allow user data for lock acquisition as recursion may occur. | 811 NOTREACHED(); |
| 801 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { | 812 return MakeUnique<ActivityUserData>(); |
| 802 NOTREACHED(); | |
| 803 return MakeUnique<ActivityUserData>(nullptr, 0); | |
| 804 } | |
| 805 | |
| 806 // Get (or reuse) a block of memory and create a real UserData object | |
| 807 // on it. | |
| 808 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); | |
| 809 void* memory = | |
| 810 allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny); | |
| 811 if (memory) { | |
| 812 std::unique_ptr<ActivityUserData> user_data = | |
| 813 MakeUnique<ActivityUserData>(memory, kUserDataSize); | |
| 814 stack_[id].user_data_ref = ref; | |
| 815 stack_[id].user_data_id = user_data->id(); | |
| 816 return user_data; | |
| 817 } | |
| 818 } | 813 } |
| 819 | 814 |
| 820 // Return a dummy object that will still accept (but ignore) Set() calls. | 815 // User-data is only stored for activities actually held in the stack. |
| 821 return MakeUnique<ActivityUserData>(nullptr, 0); | 816 if (id >= stack_slots_) |
| 817 return MakeUnique<ActivityUserData>(); | |
| 818 | |
| 819 // Create and return a real UserData object. | |
| 820 return CreateUserDataForActivity(&stack_[id], allocator); | |
| 822 } | 821 } |
| 823 | 822 |
| 824 bool ThreadActivityTracker::HasUserData(ActivityId id) { | 823 bool ThreadActivityTracker::HasUserData(ActivityId id) { |
| 825 // User-data is only stored for activities actually held in the stack. | 824 // User-data is only stored for activities actually held in the stack. |
| 826 return (id < stack_slots_ && stack_[id].user_data_ref); | 825 return (id < stack_slots_ && stack_[id].user_data_ref); |
| 827 } | 826 } |
| 828 | 827 |
| 829 void ThreadActivityTracker::ReleaseUserData( | 828 void ThreadActivityTracker::ReleaseUserData( |
| 830 ActivityId id, | 829 ActivityId id, |
| 831 ActivityTrackerMemoryAllocator* allocator) { | 830 ActivityTrackerMemoryAllocator* allocator) { |
| 832 // User-data is only stored for activities actually held in the stack. | 831 // User-data is only stored for activities actually held in the stack. |
| 833 if (id < stack_slots_ && stack_[id].user_data_ref) { | 832 if (id < stack_slots_ && stack_[id].user_data_ref) { |
| 834 allocator->ReleaseObjectReference(stack_[id].user_data_ref); | 833 allocator->ReleaseObjectReference(stack_[id].user_data_ref); |
| 835 stack_[id].user_data_ref = 0; | 834 stack_[id].user_data_ref = 0; |
| 836 } | 835 } |
| 836 // Release user-data for an exception. | |
| 837 if (header_->last_exception.user_data_ref) { | |
| 838 exception_data.reset(); | |
| 839 allocator->ReleaseObjectReference(header_->last_exception.user_data_ref); | |
| 840 header_->last_exception.user_data_ref = 0; | |
| 841 } | |
| 842 } | |
| 843 | |
| 844 ActivityUserData& ThreadActivityTracker::ExceptionActivity( | |
| 845 const void* program_counter, | |
| 846 const void* origin, | |
|
Sigurður Ásgeirsson
2017/02/24 18:37:41
is this the exception address? If so, maybe name i
bcwhite
2017/02/24 19:10:58
Yes, it's for holding the address where the except
| |
| 847 Activity::Type type, | |
| 848 const ActivityData& data, | |
| 849 ActivityTrackerMemoryAllocator* allocator) { | |
|
Sigurður Ásgeirsson
2017/02/24 18:37:41
Note that here you will be executing in the contex
bcwhite
2017/02/24 19:10:58
There are two points that need hardening:
1) Crea
manzagop (departed)
2017/02/27 16:05:15
IIRC the locks are to enable memory reuse. Another
bcwhite
2017/03/14 12:53:25
Correct on all counts. Better to avoid the leaks
| |
| 850 // A thread-checker creates a lock to check the thread-id which means | |
| 851 // re-entry into this code if lock acquisitions are being tracked. | |
| 852 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 853 | |
| 854 // If there was a previous exception, release its user-data. | |
| 855 if (header_->last_exception.user_data_ref) { | |
| 856 allocator->ReleaseObjectReference(header_->last_exception.user_data_ref); | |
| 857 header_->last_exception.user_data_ref = 0; | |
| 858 } | |
| 859 | |
| 860 // Fill the reusable exception activity. | |
| 861 Activity::FillFrom(&header_->last_exception, program_counter, origin, type, | |
| 862 data); | |
| 863 | |
| 864 // Create a new user-data object for holding additional information. | |
| 865 exception_data = | |
| 866 CreateUserDataForActivity(&header_->last_exception, allocator); | |
| 867 | |
| 868 // The data has changed meaning that some other thread trying to copy the | |
| 869 // contents for reporting purposes could get bad data. | |
| 870 header_->data_unchanged.store(0, std::memory_order_relaxed); | |
| 871 | |
| 872 return *exception_data; | |
| 837 } | 873 } |
| 838 | 874 |
| 839 bool ThreadActivityTracker::IsValid() const { | 875 bool ThreadActivityTracker::IsValid() const { |
| 840 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || | 876 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || |
| 841 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || | 877 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || |
| 842 header_->start_time == 0 || header_->start_ticks == 0 || | 878 header_->start_time == 0 || header_->start_ticks == 0 || |
| 843 header_->stack_slots != stack_slots_ || | 879 header_->stack_slots != stack_slots_ || |
| 844 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { | 880 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { |
| 845 return false; | 881 return false; |
| 846 } | 882 } |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 871 | 907 |
| 872 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | 908 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 873 // Remember the data IDs to ensure nothing is replaced during the snapshot | 909 // Remember the data IDs to ensure nothing is replaced during the snapshot |
| 874 // operation. Use "acquire" so that all the non-atomic fields of the | 910 // operation. Use "acquire" so that all the non-atomic fields of the |
| 875 // structure are valid (at least at the current moment in time). | 911 // structure are valid (at least at the current moment in time). |
| 876 const uint32_t starting_id = | 912 const uint32_t starting_id = |
| 877 header_->owner.data_id.load(std::memory_order_acquire); | 913 header_->owner.data_id.load(std::memory_order_acquire); |
| 878 const int64_t starting_process_id = header_->owner.process_id; | 914 const int64_t starting_process_id = header_->owner.process_id; |
| 879 const int64_t starting_thread_id = header_->thread_ref.as_id; | 915 const int64_t starting_thread_id = header_->thread_ref.as_id; |
| 880 | 916 |
| 881 // Write a non-zero value to |stack_unchanged| so it's possible to detect | 917 // Write a non-zero value to |data_unchanged| so it's possible to detect |
| 882 // at the end that nothing has changed since copying the data began. A | 918 // at the end that nothing has changed since copying the data began. A |
| 883 // "cst" operation is required to ensure it occurs before everything else. | 919 // "cst" operation is required to ensure it occurs before everything else. |
| 884 // Using "cst" memory ordering is relatively expensive but this is only | 920 // Using "cst" memory ordering is relatively expensive but this is only |
| 885 // done during analysis so doesn't directly affect the worker threads. | 921 // done during analysis so doesn't directly affect the worker threads. |
| 886 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | 922 header_->data_unchanged.store(1, std::memory_order_seq_cst); |
| 887 | 923 |
| 888 // Fetching the current depth also "acquires" the contents of the stack. | 924 // Fetching the current depth also "acquires" the contents of the stack. |
| 889 depth = header_->current_depth.load(std::memory_order_acquire); | 925 depth = header_->current_depth.load(std::memory_order_acquire); |
| 890 uint32_t count = std::min(depth, stack_slots_); | 926 uint32_t count = std::min(depth, stack_slots_); |
| 891 output_snapshot->activity_stack.resize(count); | 927 output_snapshot->activity_stack.resize(count); |
| 892 if (count > 0) { | 928 if (count > 0) { |
| 893 // Copy the existing contents. Memcpy is used for speed. | 929 // Copy the existing contents. Memcpy is used for speed. |
| 894 memcpy(&output_snapshot->activity_stack[0], stack_, | 930 memcpy(&output_snapshot->activity_stack[0], stack_, |
| 895 count * sizeof(Activity)); | 931 count * sizeof(Activity)); |
| 896 } | 932 } |
| 897 | 933 |
| 934 // Capture the last exception. | |
| 935 memcpy(&output_snapshot->last_exception, &header_->last_exception, | |
| 936 sizeof(Activity)); | |
| 937 | |
| 938 // TODO(bcwhite): Snapshot other things here. | |
| 939 | |
| 898 // Retry if something changed during the copy. A "cst" operation ensures | 940 // Retry if something changed during the copy. A "cst" operation ensures |
| 899 // it must happen after all the above operations. | 941 // it must happen after all the above operations. |
| 900 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) | 942 if (!header_->data_unchanged.load(std::memory_order_seq_cst)) |
| 901 continue; | 943 continue; |
| 902 | 944 |
| 903 // Stack copied. Record it's full depth. | 945 // Stack copied. Record it's full depth. |
| 904 output_snapshot->activity_stack_depth = depth; | 946 output_snapshot->activity_stack_depth = depth; |
| 905 | 947 |
| 906 // TODO(bcwhite): Snapshot other things here. | |
| 907 | |
| 908 // Get the general thread information. | 948 // Get the general thread information. |
| 909 output_snapshot->thread_name = | 949 output_snapshot->thread_name = |
| 910 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); | 950 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); |
| 911 output_snapshot->thread_id = header_->thread_ref.as_id; | 951 output_snapshot->thread_id = header_->thread_ref.as_id; |
| 912 output_snapshot->process_id = header_->owner.process_id; | 952 output_snapshot->process_id = header_->owner.process_id; |
| 913 | 953 |
| 914 // All characters of the thread-name buffer were copied so as to not break | 954 // All characters of the thread-name buffer were copied so as to not break |
| 915 // if the trailing NUL were missing. Now limit the length if the actual | 955 // if the trailing NUL were missing. Now limit the length if the actual |
| 916 // name is shorter. | 956 // name is shorter. |
| 917 output_snapshot->thread_name.resize( | 957 output_snapshot->thread_name.resize( |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 933 | 973 |
| 934 // Change all the timestamps in the activities from "ticks" to "wall" time. | 974 // Change all the timestamps in the activities from "ticks" to "wall" time. |
| 935 const Time start_time = Time::FromInternalValue(header_->start_time); | 975 const Time start_time = Time::FromInternalValue(header_->start_time); |
| 936 const int64_t start_ticks = header_->start_ticks; | 976 const int64_t start_ticks = header_->start_ticks; |
| 937 for (Activity& activity : output_snapshot->activity_stack) { | 977 for (Activity& activity : output_snapshot->activity_stack) { |
| 938 activity.time_internal = | 978 activity.time_internal = |
| 939 (start_time + | 979 (start_time + |
| 940 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) | 980 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) |
| 941 .ToInternalValue(); | 981 .ToInternalValue(); |
| 942 } | 982 } |
| 983 output_snapshot->last_exception.time_internal = | |
| 984 (start_time + | |
| 985 TimeDelta::FromInternalValue( | |
| 986 output_snapshot->last_exception.time_internal - start_ticks)) | |
| 987 .ToInternalValue(); | |
| 943 | 988 |
| 944 // Success! | 989 // Success! |
| 945 return true; | 990 return true; |
| 946 } | 991 } |
| 947 | 992 |
| 948 // Too many attempts. | 993 // Too many attempts. |
| 949 return false; | 994 return false; |
| 950 } | 995 } |
| 951 | 996 |
| 952 const void* ThreadActivityTracker::GetBaseAddress() { | 997 const void* ThreadActivityTracker::GetBaseAddress() { |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 964 int64_t* out_stamp) { | 1009 int64_t* out_stamp) { |
| 965 const Header* header = reinterpret_cast<const Header*>(memory); | 1010 const Header* header = reinterpret_cast<const Header*>(memory); |
| 966 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); | 1011 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); |
| 967 } | 1012 } |
| 968 | 1013 |
| 969 // static | 1014 // static |
| 970 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | 1015 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| 971 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | 1016 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
| 972 } | 1017 } |
| 973 | 1018 |
| 1019 std::unique_ptr<ActivityUserData> | |
| 1020 ThreadActivityTracker::CreateUserDataForActivity( | |
| 1021 Activity* activity, | |
| 1022 ActivityTrackerMemoryAllocator* allocator) { | |
| 1023 DCHECK_EQ(0U, activity->user_data_ref); | |
| 1024 | |
| 1025 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); | |
| 1026 void* memory = allocator->GetAsArray<char>(ref, kUserDataSize); | |
| 1027 if (memory) { | |
| 1028 std::unique_ptr<ActivityUserData> user_data = | |
| 1029 MakeUnique<ActivityUserData>(memory, kUserDataSize); | |
| 1030 activity->user_data_ref = ref; | |
| 1031 activity->user_data_id = user_data->id(); | |
| 1032 return user_data; | |
| 1033 } | |
| 1034 | |
| 1035 // Return a dummy object that will still accept (but ignore) Set() calls. | |
| 1036 return MakeUnique<ActivityUserData>(); | |
| 1037 } | |
| 1038 | |
| 974 // The instantiation of the GlobalActivityTracker object. | 1039 // The instantiation of the GlobalActivityTracker object. |
| 975 // The object held here will obviously not be destructed at process exit | 1040 // The object held here will obviously not be destructed at process exit |
| 976 // but that's best since PersistentMemoryAllocator objects (that underlie | 1041 // but that's best since PersistentMemoryAllocator objects (that underlie |
| 977 // GlobalActivityTracker objects) are explicitly forbidden from doing anything | 1042 // GlobalActivityTracker objects) are explicitly forbidden from doing anything |
| 978 // essential at exit anyway due to the fact that they depend on data managed | 1043 // essential at exit anyway due to the fact that they depend on data managed |
| 979 // elsewhere and which could be destructed first. An AtomicWord is used instead | 1044 // elsewhere and which could be destructed first. An AtomicWord is used instead |
| 980 // of std::atomic because the latter can create global ctors and dtors. | 1045 // of std::atomic because the latter can create global ctors and dtors. |
| 981 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0; | 1046 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0; |
| 982 | 1047 |
| 983 GlobalActivityTracker::ModuleInfo::ModuleInfo() {} | 1048 GlobalActivityTracker::ModuleInfo::ModuleInfo() {} |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1117 } | 1182 } |
| 1118 | 1183 |
| 1119 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { | 1184 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { |
| 1120 if (!user_data_) { | 1185 if (!user_data_) { |
| 1121 if (tracker_) { | 1186 if (tracker_) { |
| 1122 GlobalActivityTracker* global = GlobalActivityTracker::Get(); | 1187 GlobalActivityTracker* global = GlobalActivityTracker::Get(); |
| 1123 AutoLock lock(global->user_data_allocator_lock_); | 1188 AutoLock lock(global->user_data_allocator_lock_); |
| 1124 user_data_ = | 1189 user_data_ = |
| 1125 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); | 1190 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); |
| 1126 } else { | 1191 } else { |
| 1127 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); | 1192 user_data_ = MakeUnique<ActivityUserData>(); |
| 1128 } | 1193 } |
| 1129 } | 1194 } |
| 1130 return *user_data_; | 1195 return *user_data_; |
| 1131 } | 1196 } |
| 1132 | 1197 |
| 1133 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, | 1198 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, |
| 1134 size_t size) | 1199 size_t size) |
| 1135 : ActivityUserData(memory, size) {} | 1200 : ActivityUserData(memory, size) {} |
| 1136 | 1201 |
| 1137 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} | 1202 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} |
| (...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1549 | 1614 |
| 1550 // Remove the destructed tracker from the set of known ones. | 1615 // Remove the destructed tracker from the set of known ones. |
| 1551 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 1616 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 1552 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 1617 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
| 1553 | 1618 |
| 1554 // Release this memory for re-use at a later time. | 1619 // Release this memory for re-use at a later time. |
| 1555 base::AutoLock autolock(thread_tracker_allocator_lock_); | 1620 base::AutoLock autolock(thread_tracker_allocator_lock_); |
| 1556 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); | 1621 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); |
| 1557 } | 1622 } |
| 1558 | 1623 |
| 1624 ActivityUserData& GlobalActivityTracker::RecordExceptionImpl( | |
| 1625 const void* pc, | |
| 1626 const void* origin) { | |
| 1627 ThreadActivityTracker* tracker = GetOrCreateTrackerForCurrentThread(); | |
| 1628 if (!tracker) | |
| 1629 return g_data_sink.Get(); | |
| 1630 | |
| 1631 AutoLock lock(user_data_allocator_lock_); | |
| 1632 return tracker->ExceptionActivity(pc, origin, Activity::ACT_EXCEPTION, | |
| 1633 ActivityData::ForGeneric(0, 0), | |
| 1634 &user_data_allocator_); | |
| 1635 } | |
| 1636 | |
| 1559 // static | 1637 // static |
| 1560 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 1638 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 1561 delete reinterpret_cast<ManagedActivityTracker*>(value); | 1639 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 1562 } | 1640 } |
| 1563 | 1641 |
| 1564 ScopedActivity::ScopedActivity(const void* program_counter, | 1642 ScopedActivity::ScopedActivity(const void* program_counter, |
| 1565 uint8_t action, | 1643 uint8_t action, |
| 1566 uint32_t id, | 1644 uint32_t id, |
| 1567 int32_t info) | 1645 int32_t info) |
| 1568 : GlobalActivityTracker::ScopedThreadActivity( | 1646 : GlobalActivityTracker::ScopedThreadActivity( |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1639 : GlobalActivityTracker::ScopedThreadActivity( | 1717 : GlobalActivityTracker::ScopedThreadActivity( |
| 1640 program_counter, | 1718 program_counter, |
| 1641 nullptr, | 1719 nullptr, |
| 1642 Activity::ACT_PROCESS_WAIT, | 1720 Activity::ACT_PROCESS_WAIT, |
| 1643 ActivityData::ForProcess(process->Pid()), | 1721 ActivityData::ForProcess(process->Pid()), |
| 1644 /*lock_allowed=*/true) {} | 1722 /*lock_allowed=*/true) {} |
| 1645 #endif | 1723 #endif |
| 1646 | 1724 |
| 1647 } // namespace debug | 1725 } // namespace debug |
| 1648 } // namespace base | 1726 } // namespace base |
| OLD | NEW |