| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/activity_tracker.h" | 5 #include "base/debug/activity_tracker.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 #include <utility> | 9 #include <utility> |
| 10 | 10 |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 97 // Determines the previous aligned index. | 97 // Determines the previous aligned index. |
| 98 size_t RoundDownToAlignment(size_t index, size_t alignment) { | 98 size_t RoundDownToAlignment(size_t index, size_t alignment) { |
| 99 return index & (0 - alignment); | 99 return index & (0 - alignment); |
| 100 } | 100 } |
| 101 | 101 |
| 102 // Determines the next aligned index. | 102 // Determines the next aligned index. |
| 103 size_t RoundUpToAlignment(size_t index, size_t alignment) { | 103 size_t RoundUpToAlignment(size_t index, size_t alignment) { |
| 104 return (index + (alignment - 1)) & (0 - alignment); | 104 return (index + (alignment - 1)) & (0 - alignment); |
| 105 } | 105 } |
| 106 | 106 |
| 107 // Converts "tick" timing into wall time. |
| 108 Time WallTimeFromTickTime(int64_t ticks_start, int64_t ticks, Time time_start) { |
| 109 return time_start + TimeDelta::FromInternalValue(ticks - ticks_start); |
| 110 } |
| 111 |
| 107 } // namespace | 112 } // namespace |
| 108 | 113 |
| 109 OwningProcess::OwningProcess() {} | 114 OwningProcess::OwningProcess() {} |
| 110 OwningProcess::~OwningProcess() {} | 115 OwningProcess::~OwningProcess() {} |
| 111 | 116 |
| 112 void OwningProcess::Release_Initialize() { | 117 void OwningProcess::Release_Initialize() { |
| 113 uint32_t old_id = data_id.load(std::memory_order_acquire); | 118 uint32_t old_id = data_id.load(std::memory_order_acquire); |
| 114 DCHECK_EQ(0U, old_id); | 119 DCHECK_EQ(0U, old_id); |
| 115 process_id = GetCurrentProcId(); | 120 process_id = GetCurrentProcId(); |
| 116 create_stamp = Time::Now().ToInternalValue(); | 121 create_stamp = Time::Now().ToInternalValue(); |
| (...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 310 // thus clang requires explicit out-of-line constructors and destructors even | 315 // thus clang requires explicit out-of-line constructors and destructors even |
| 311 // when they do nothing. | 316 // when they do nothing. |
| 312 ActivityUserData::ValueInfo::ValueInfo() {} | 317 ActivityUserData::ValueInfo::ValueInfo() {} |
| 313 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; | 318 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; |
| 314 ActivityUserData::ValueInfo::~ValueInfo() {} | 319 ActivityUserData::ValueInfo::~ValueInfo() {} |
| 315 ActivityUserData::MemoryHeader::MemoryHeader() {} | 320 ActivityUserData::MemoryHeader::MemoryHeader() {} |
| 316 ActivityUserData::MemoryHeader::~MemoryHeader() {} | 321 ActivityUserData::MemoryHeader::~MemoryHeader() {} |
| 317 ActivityUserData::FieldHeader::FieldHeader() {} | 322 ActivityUserData::FieldHeader::FieldHeader() {} |
| 318 ActivityUserData::FieldHeader::~FieldHeader() {} | 323 ActivityUserData::FieldHeader::~FieldHeader() {} |
| 319 | 324 |
| 325 ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {} |
| 326 |
| 320 ActivityUserData::ActivityUserData(void* memory, size_t size) | 327 ActivityUserData::ActivityUserData(void* memory, size_t size) |
| 321 : memory_(reinterpret_cast<char*>(memory)), | 328 : memory_(reinterpret_cast<char*>(memory)), |
| 322 available_(RoundDownToAlignment(size, kMemoryAlignment)), | 329 available_(RoundDownToAlignment(size, kMemoryAlignment)), |
| 323 header_(reinterpret_cast<MemoryHeader*>(memory)) { | 330 header_(reinterpret_cast<MemoryHeader*>(memory)) { |
| 324 // It's possible that no user data is being stored. | 331 // It's possible that no user data is being stored. |
| 325 if (!memory_) | 332 if (!memory_) |
| 326 return; | 333 return; |
| 327 | 334 |
| 328 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); | 335 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); |
| 329 DCHECK_LT(sizeof(MemoryHeader), available_); | 336 DCHECK_LT(sizeof(MemoryHeader), available_); |
| (...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 553 // This information is kept for every thread that is tracked. It is filled | 560 // This information is kept for every thread that is tracked. It is filled |
| 554 // the very first time the thread is seen. All fields must be of exact sizes | 561 // the very first time the thread is seen. All fields must be of exact sizes |
| 555 // so there is no issue moving between 32 and 64-bit builds. | 562 // so there is no issue moving between 32 and 64-bit builds. |
| 556 struct ThreadActivityTracker::Header { | 563 struct ThreadActivityTracker::Header { |
| 557 // Defined in .h for analyzer access. Increment this if structure changes! | 564 // Defined in .h for analyzer access. Increment this if structure changes! |
| 558 static constexpr uint32_t kPersistentTypeId = | 565 static constexpr uint32_t kPersistentTypeId = |
| 559 GlobalActivityTracker::kTypeIdActivityTracker; | 566 GlobalActivityTracker::kTypeIdActivityTracker; |
| 560 | 567 |
| 561 // Expected size for 32/64-bit check. | 568 // Expected size for 32/64-bit check. |
| 562 static constexpr size_t kExpectedInstanceSize = | 569 static constexpr size_t kExpectedInstanceSize = |
| 563 OwningProcess::kExpectedInstanceSize + 72; | 570 OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize + |
| 571 72; |
| 564 | 572 |
| 565 // This information uniquely identifies a process. | 573 // This information uniquely identifies a process. |
| 566 OwningProcess owner; | 574 OwningProcess owner; |
| 567 | 575 |
| 568 // The thread-id (thread_ref.as_id) to which this data belongs. This number | 576 // The thread-id (thread_ref.as_id) to which this data belongs. This number |
| 569 // is not guaranteed to mean anything but combined with the process-id from | 577 // is not guaranteed to mean anything but combined with the process-id from |
| 570 // OwningProcess is unique among all active trackers. | 578 // OwningProcess is unique among all active trackers. |
| 571 ThreadRef thread_ref; | 579 ThreadRef thread_ref; |
| 572 | 580 |
| 573 // The start-time and start-ticks when the data was created. Each activity | 581 // The start-time and start-ticks when the data was created. Each activity |
| 574 // record has a |time_internal| value that can be converted to a "wall time" | 582 // record has a |time_internal| value that can be converted to a "wall time" |
| 575 // with these two values. | 583 // with these two values. |
| 576 int64_t start_time; | 584 int64_t start_time; |
| 577 int64_t start_ticks; | 585 int64_t start_ticks; |
| 578 | 586 |
| 579 // The number of Activity slots (spaces that can hold an Activity) that | 587 // The number of Activity slots (spaces that can hold an Activity) that |
| 580 // immediately follow this structure in memory. | 588 // immediately follow this structure in memory. |
| 581 uint32_t stack_slots; | 589 uint32_t stack_slots; |
| 582 | 590 |
| 583 // Some padding to keep everything 64-bit aligned. | 591 // Some padding to keep everything 64-bit aligned. |
| 584 uint32_t padding; | 592 uint32_t padding; |
| 585 | 593 |
| 586 // The current depth of the stack. This may be greater than the number of | 594 // The current depth of the stack. This may be greater than the number of |
| 587 // slots. If the depth exceeds the number of slots, the newest entries | 595 // slots. If the depth exceeds the number of slots, the newest entries |
| 588 // won't be recorded. | 596 // won't be recorded. |
| 589 std::atomic<uint32_t> current_depth; | 597 std::atomic<uint32_t> current_depth; |
| 590 | 598 |
| 591 // A memory location used to indicate if changes have been made to the stack | 599 // A memory location used to indicate if changes have been made to the data |
| 592 // that would invalidate an in-progress read of its contents. The active | 600 // that would invalidate an in-progress read of its contents. The active |
| 593 // tracker will zero the value whenever something gets popped from the | 601 // tracker will zero the value whenever something gets popped from the |
| 594 // stack. A monitoring tracker can write a non-zero value here, copy the | 602 // stack. A monitoring tracker can write a non-zero value here, copy the |
| 595 // stack contents, and read the value to know, if it is still non-zero, that | 603 // stack contents, and read the value to know, if it is still non-zero, that |
| 596 // the contents didn't change while being copied. This can handle concurrent | 604 // the contents didn't change while being copied. This can handle concurrent |
| 597 // snapshot operations only if each snapshot writes a different bit (which | 605 // snapshot operations only if each snapshot writes a different bit (which |
| 598 // is not the current implementation so no parallel snapshots allowed). | 606 // is not the current implementation so no parallel snapshots allowed). |
| 599 std::atomic<uint32_t> stack_unchanged; | 607 std::atomic<uint32_t> data_unchanged; |
| 608 |
| 609 // The last "exception" activity. This can't be stored on the stack because |
| 610 // that could get popped as things unwind. |
| 611 Activity last_exception; |
| 600 | 612 |
| 601 // The name of the thread (up to a maximum length). Dynamic-length names | 613 // The name of the thread (up to a maximum length). Dynamic-length names |
| 602 // are not practical since the memory has to come from the same persistent | 614 // are not practical since the memory has to come from the same persistent |
| 603 // allocator that holds this structure and to which this object has no | 615 // allocator that holds this structure and to which this object has no |
| 604 // reference. | 616 // reference. |
| 605 char thread_name[32]; | 617 char thread_name[32]; |
| 606 }; | 618 }; |
| 607 | 619 |
| 608 ThreadActivityTracker::Snapshot::Snapshot() {} | 620 ThreadActivityTracker::Snapshot::Snapshot() {} |
| 609 ThreadActivityTracker::Snapshot::~Snapshot() {} | 621 ThreadActivityTracker::Snapshot::~Snapshot() {} |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 667 // Provided memory should either be completely initialized or all zeros. | 679 // Provided memory should either be completely initialized or all zeros. |
| 668 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { | 680 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { |
| 669 // This is a new file. Double-check other fields and then initialize. | 681 // This is a new file. Double-check other fields and then initialize. |
| 670 DCHECK_EQ(0, header_->owner.process_id); | 682 DCHECK_EQ(0, header_->owner.process_id); |
| 671 DCHECK_EQ(0, header_->owner.create_stamp); | 683 DCHECK_EQ(0, header_->owner.create_stamp); |
| 672 DCHECK_EQ(0, header_->thread_ref.as_id); | 684 DCHECK_EQ(0, header_->thread_ref.as_id); |
| 673 DCHECK_EQ(0, header_->start_time); | 685 DCHECK_EQ(0, header_->start_time); |
| 674 DCHECK_EQ(0, header_->start_ticks); | 686 DCHECK_EQ(0, header_->start_ticks); |
| 675 DCHECK_EQ(0U, header_->stack_slots); | 687 DCHECK_EQ(0U, header_->stack_slots); |
| 676 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); | 688 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); |
| 677 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); | 689 DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed)); |
| 678 DCHECK_EQ(0, stack_[0].time_internal); | 690 DCHECK_EQ(0, stack_[0].time_internal); |
| 679 DCHECK_EQ(0U, stack_[0].origin_address); | 691 DCHECK_EQ(0U, stack_[0].origin_address); |
| 680 DCHECK_EQ(0U, stack_[0].call_stack[0]); | 692 DCHECK_EQ(0U, stack_[0].call_stack[0]); |
| 681 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | 693 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); |
| 682 | 694 |
| 683 #if defined(OS_WIN) | 695 #if defined(OS_WIN) |
| 684 header_->thread_ref.as_tid = PlatformThread::CurrentId(); | 696 header_->thread_ref.as_tid = PlatformThread::CurrentId(); |
| 685 #elif defined(OS_POSIX) | 697 #elif defined(OS_POSIX) |
| 686 header_->thread_ref.as_handle = | 698 header_->thread_ref.as_handle = |
| 687 PlatformThread::CurrentHandle().platform_handle(); | 699 PlatformThread::CurrentHandle().platform_handle(); |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 781 // Validate that everything is running correctly. | 793 // Validate that everything is running correctly. |
| 782 DCHECK_EQ(id, depth); | 794 DCHECK_EQ(id, depth); |
| 783 | 795 |
| 784 // A thread-checker creates a lock to check the thread-id which means | 796 // A thread-checker creates a lock to check the thread-id which means |
| 785 // re-entry into this code if lock acquisitions are being tracked. | 797 // re-entry into this code if lock acquisitions are being tracked. |
| 786 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || | 798 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || |
| 787 thread_checker_.CalledOnValidThread()); | 799 thread_checker_.CalledOnValidThread()); |
| 788 | 800 |
| 789 // The stack has shrunk meaning that some other thread trying to copy the | 801 // The stack has shrunk meaning that some other thread trying to copy the |
| 790 // contents for reporting purposes could get bad data. That thread would | 802 // contents for reporting purposes could get bad data. That thread would |
| 791 // have written a non-zero value into |stack_unchanged|; clearing it here | 803 // have written a non-zero value into |data_unchanged|; clearing it here |
| 792 // will let that thread detect that something did change. This needs to | 804 // will let that thread detect that something did change. This needs to |
| 793 // happen after the atomic |depth| operation above so a "release" store | 805 // happen after the atomic |depth| operation above so a "release" store |
| 794 // is required. | 806 // is required. |
| 795 header_->stack_unchanged.store(0, std::memory_order_release); | 807 header_->data_unchanged.store(0, std::memory_order_release); |
| 796 } | 808 } |
| 797 | 809 |
| 798 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( | 810 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( |
| 799 ActivityId id, | 811 ActivityId id, |
| 800 ActivityTrackerMemoryAllocator* allocator) { | 812 ActivityTrackerMemoryAllocator* allocator) { |
| 801 // User-data is only stored for activities actually held in the stack. | 813 // Don't allow user data for lock acquisition as recursion may occur. |
| 802 if (id < stack_slots_) { | 814 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { |
| 803 // Don't allow user data for lock acquisition as recursion may occur. | 815 NOTREACHED(); |
| 804 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { | 816 return MakeUnique<ActivityUserData>(); |
| 805 NOTREACHED(); | |
| 806 return MakeUnique<ActivityUserData>(nullptr, 0); | |
| 807 } | |
| 808 | |
| 809 // Get (or reuse) a block of memory and create a real UserData object | |
| 810 // on it. | |
| 811 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); | |
| 812 void* memory = | |
| 813 allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny); | |
| 814 if (memory) { | |
| 815 std::unique_ptr<ActivityUserData> user_data = | |
| 816 MakeUnique<ActivityUserData>(memory, kUserDataSize); | |
| 817 stack_[id].user_data_ref = ref; | |
| 818 stack_[id].user_data_id = user_data->id(); | |
| 819 return user_data; | |
| 820 } | |
| 821 } | 817 } |
| 822 | 818 |
| 823 // Return a dummy object that will still accept (but ignore) Set() calls. | 819 // User-data is only stored for activities actually held in the stack. |
| 824 return MakeUnique<ActivityUserData>(nullptr, 0); | 820 if (id >= stack_slots_) |
| 821 return MakeUnique<ActivityUserData>(); |
| 822 |
| 823 // Create and return a real UserData object. |
| 824 return CreateUserDataForActivity(&stack_[id], allocator); |
| 825 } | 825 } |
| 826 | 826 |
| 827 bool ThreadActivityTracker::HasUserData(ActivityId id) { | 827 bool ThreadActivityTracker::HasUserData(ActivityId id) { |
| 828 // User-data is only stored for activities actually held in the stack. | 828 // User-data is only stored for activities actually held in the stack. |
| 829 return (id < stack_slots_ && stack_[id].user_data_ref); | 829 return (id < stack_slots_ && stack_[id].user_data_ref); |
| 830 } | 830 } |
| 831 | 831 |
| 832 void ThreadActivityTracker::ReleaseUserData( | 832 void ThreadActivityTracker::ReleaseUserData( |
| 833 ActivityId id, | 833 ActivityId id, |
| 834 ActivityTrackerMemoryAllocator* allocator) { | 834 ActivityTrackerMemoryAllocator* allocator) { |
| 835 // User-data is only stored for activities actually held in the stack. | 835 // User-data is only stored for activities actually held in the stack. |
| 836 if (id < stack_slots_ && stack_[id].user_data_ref) { | 836 if (id < stack_slots_ && stack_[id].user_data_ref) { |
| 837 allocator->ReleaseObjectReference(stack_[id].user_data_ref); | 837 allocator->ReleaseObjectReference(stack_[id].user_data_ref); |
| 838 stack_[id].user_data_ref = 0; | 838 stack_[id].user_data_ref = 0; |
| 839 } | 839 } |
| 840 } | 840 } |
| 841 | 841 |
| 842 void ThreadActivityTracker::RecordExceptionActivity(const void* program_counter, |
| 843 const void* origin, |
| 844 Activity::Type type, |
| 845 const ActivityData& data) { |
| 846 // A thread-checker creates a lock to check the thread-id which means |
| 847 // re-entry into this code if lock acquisitions are being tracked. |
| 848 DCHECK(thread_checker_.CalledOnValidThread()); |
| 849 |
| 850 // Fill the reusable exception activity. |
| 851 Activity::FillFrom(&header_->last_exception, program_counter, origin, type, |
| 852 data); |
| 853 |
| 854 // The data has changed meaning that some other thread trying to copy the |
| 855 // contents for reporting purposes could get bad data. |
| 856 header_->data_unchanged.store(0, std::memory_order_relaxed); |
| 857 } |
| 858 |
| 842 bool ThreadActivityTracker::IsValid() const { | 859 bool ThreadActivityTracker::IsValid() const { |
| 843 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || | 860 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || |
| 844 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || | 861 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || |
| 845 header_->start_time == 0 || header_->start_ticks == 0 || | 862 header_->start_time == 0 || header_->start_ticks == 0 || |
| 846 header_->stack_slots != stack_slots_ || | 863 header_->stack_slots != stack_slots_ || |
| 847 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { | 864 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { |
| 848 return false; | 865 return false; |
| 849 } | 866 } |
| 850 | 867 |
| 851 return valid_; | 868 return valid_; |
| (...skipping 22 matching lines...) Expand all Loading... |
| 874 | 891 |
| 875 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | 892 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { |
| 876 // Remember the data IDs to ensure nothing is replaced during the snapshot | 893 // Remember the data IDs to ensure nothing is replaced during the snapshot |
| 877 // operation. Use "acquire" so that all the non-atomic fields of the | 894 // operation. Use "acquire" so that all the non-atomic fields of the |
| 878 // structure are valid (at least at the current moment in time). | 895 // structure are valid (at least at the current moment in time). |
| 879 const uint32_t starting_id = | 896 const uint32_t starting_id = |
| 880 header_->owner.data_id.load(std::memory_order_acquire); | 897 header_->owner.data_id.load(std::memory_order_acquire); |
| 881 const int64_t starting_process_id = header_->owner.process_id; | 898 const int64_t starting_process_id = header_->owner.process_id; |
| 882 const int64_t starting_thread_id = header_->thread_ref.as_id; | 899 const int64_t starting_thread_id = header_->thread_ref.as_id; |
| 883 | 900 |
| 884 // Write a non-zero value to |stack_unchanged| so it's possible to detect | 901 // Write a non-zero value to |data_unchanged| so it's possible to detect |
| 885 // at the end that nothing has changed since copying the data began. A | 902 // at the end that nothing has changed since copying the data began. A |
| 886 // "cst" operation is required to ensure it occurs before everything else. | 903 // "cst" operation is required to ensure it occurs before everything else. |
| 887 // Using "cst" memory ordering is relatively expensive but this is only | 904 // Using "cst" memory ordering is relatively expensive but this is only |
| 888 // done during analysis so doesn't directly affect the worker threads. | 905 // done during analysis so doesn't directly affect the worker threads. |
| 889 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | 906 header_->data_unchanged.store(1, std::memory_order_seq_cst); |
| 890 | 907 |
| 891 // Fetching the current depth also "acquires" the contents of the stack. | 908 // Fetching the current depth also "acquires" the contents of the stack. |
| 892 depth = header_->current_depth.load(std::memory_order_acquire); | 909 depth = header_->current_depth.load(std::memory_order_acquire); |
| 893 uint32_t count = std::min(depth, stack_slots_); | 910 uint32_t count = std::min(depth, stack_slots_); |
| 894 output_snapshot->activity_stack.resize(count); | 911 output_snapshot->activity_stack.resize(count); |
| 895 if (count > 0) { | 912 if (count > 0) { |
| 896 // Copy the existing contents. Memcpy is used for speed. | 913 // Copy the existing contents. Memcpy is used for speed. |
| 897 memcpy(&output_snapshot->activity_stack[0], stack_, | 914 memcpy(&output_snapshot->activity_stack[0], stack_, |
| 898 count * sizeof(Activity)); | 915 count * sizeof(Activity)); |
| 899 } | 916 } |
| 900 | 917 |
| 918 // Capture the last exception. |
| 919 memcpy(&output_snapshot->last_exception, &header_->last_exception, |
| 920 sizeof(Activity)); |
| 921 |
| 922 // TODO(bcwhite): Snapshot other things here. |
| 923 |
| 901 // Retry if something changed during the copy. A "cst" operation ensures | 924 // Retry if something changed during the copy. A "cst" operation ensures |
| 902 // it must happen after all the above operations. | 925 // it must happen after all the above operations. |
| 903 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) | 926 if (!header_->data_unchanged.load(std::memory_order_seq_cst)) |
| 904 continue; | 927 continue; |
| 905 | 928 |
| 906 // Stack copied. Record it's full depth. | 929 // Stack copied. Record it's full depth. |
| 907 output_snapshot->activity_stack_depth = depth; | 930 output_snapshot->activity_stack_depth = depth; |
| 908 | 931 |
| 909 // TODO(bcwhite): Snapshot other things here. | |
| 910 | |
| 911 // Get the general thread information. | 932 // Get the general thread information. |
| 912 output_snapshot->thread_name = | 933 output_snapshot->thread_name = |
| 913 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); | 934 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); |
| 914 output_snapshot->thread_id = header_->thread_ref.as_id; | 935 output_snapshot->thread_id = header_->thread_ref.as_id; |
| 915 output_snapshot->process_id = header_->owner.process_id; | 936 output_snapshot->process_id = header_->owner.process_id; |
| 916 | 937 |
| 917 // All characters of the thread-name buffer were copied so as to not break | 938 // All characters of the thread-name buffer were copied so as to not break |
| 918 // if the trailing NUL were missing. Now limit the length if the actual | 939 // if the trailing NUL were missing. Now limit the length if the actual |
| 919 // name is shorter. | 940 // name is shorter. |
| 920 output_snapshot->thread_name.resize( | 941 output_snapshot->thread_name.resize( |
| (...skipping 11 matching lines...) Expand all Loading... |
| 932 // it's possible for the thread to end somewhere in the middle and all its | 953 // it's possible for the thread to end somewhere in the middle and all its |
| 933 // values become garbage. | 954 // values become garbage. |
| 934 if (!IsValid()) | 955 if (!IsValid()) |
| 935 return false; | 956 return false; |
| 936 | 957 |
| 937 // Change all the timestamps in the activities from "ticks" to "wall" time. | 958 // Change all the timestamps in the activities from "ticks" to "wall" time. |
| 938 const Time start_time = Time::FromInternalValue(header_->start_time); | 959 const Time start_time = Time::FromInternalValue(header_->start_time); |
| 939 const int64_t start_ticks = header_->start_ticks; | 960 const int64_t start_ticks = header_->start_ticks; |
| 940 for (Activity& activity : output_snapshot->activity_stack) { | 961 for (Activity& activity : output_snapshot->activity_stack) { |
| 941 activity.time_internal = | 962 activity.time_internal = |
| 942 (start_time + | 963 WallTimeFromTickTime(start_ticks, activity.time_internal, start_time) |
| 943 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) | |
| 944 .ToInternalValue(); | 964 .ToInternalValue(); |
| 945 } | 965 } |
| 966 output_snapshot->last_exception.time_internal = |
| 967 WallTimeFromTickTime(start_ticks, |
| 968 output_snapshot->last_exception.time_internal, |
| 969 start_time) |
| 970 .ToInternalValue(); |
| 946 | 971 |
| 947 // Success! | 972 // Success! |
| 948 return true; | 973 return true; |
| 949 } | 974 } |
| 950 | 975 |
| 951 // Too many attempts. | 976 // Too many attempts. |
| 952 return false; | 977 return false; |
| 953 } | 978 } |
| 954 | 979 |
| 955 const void* ThreadActivityTracker::GetBaseAddress() { | 980 const void* ThreadActivityTracker::GetBaseAddress() { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 967 int64_t* out_stamp) { | 992 int64_t* out_stamp) { |
| 968 const Header* header = reinterpret_cast<const Header*>(memory); | 993 const Header* header = reinterpret_cast<const Header*>(memory); |
| 969 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); | 994 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); |
| 970 } | 995 } |
| 971 | 996 |
| 972 // static | 997 // static |
| 973 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | 998 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { |
| 974 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | 999 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); |
| 975 } | 1000 } |
| 976 | 1001 |
| 1002 std::unique_ptr<ActivityUserData> |
| 1003 ThreadActivityTracker::CreateUserDataForActivity( |
| 1004 Activity* activity, |
| 1005 ActivityTrackerMemoryAllocator* allocator) { |
| 1006 DCHECK_EQ(0U, activity->user_data_ref); |
| 1007 |
| 1008 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference(); |
| 1009 void* memory = allocator->GetAsArray<char>(ref, kUserDataSize); |
| 1010 if (memory) { |
| 1011 std::unique_ptr<ActivityUserData> user_data = |
| 1012 MakeUnique<ActivityUserData>(memory, kUserDataSize); |
| 1013 activity->user_data_ref = ref; |
| 1014 activity->user_data_id = user_data->id(); |
| 1015 return user_data; |
| 1016 } |
| 1017 |
| 1018 // Return a dummy object that will still accept (but ignore) Set() calls. |
| 1019 return MakeUnique<ActivityUserData>(); |
| 1020 } |
| 1021 |
| 977 // The instantiation of the GlobalActivityTracker object. | 1022 // The instantiation of the GlobalActivityTracker object. |
| 978 // The object held here will obviously not be destructed at process exit | 1023 // The object held here will obviously not be destructed at process exit |
| 979 // but that's best since PersistentMemoryAllocator objects (that underlie | 1024 // but that's best since PersistentMemoryAllocator objects (that underlie |
| 980 // GlobalActivityTracker objects) are explicitly forbidden from doing anything | 1025 // GlobalActivityTracker objects) are explicitly forbidden from doing anything |
| 981 // essential at exit anyway due to the fact that they depend on data managed | 1026 // essential at exit anyway due to the fact that they depend on data managed |
| 982 // elsewhere and which could be destructed first. An AtomicWord is used instead | 1027 // elsewhere and which could be destructed first. An AtomicWord is used instead |
| 983 // of std::atomic because the latter can create global ctors and dtors. | 1028 // of std::atomic because the latter can create global ctors and dtors. |
| 984 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0; | 1029 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0; |
| 985 | 1030 |
| 986 GlobalActivityTracker::ModuleInfo::ModuleInfo() {} | 1031 GlobalActivityTracker::ModuleInfo::ModuleInfo() {} |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1120 } | 1165 } |
| 1121 | 1166 |
| 1122 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { | 1167 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { |
| 1123 if (!user_data_) { | 1168 if (!user_data_) { |
| 1124 if (tracker_) { | 1169 if (tracker_) { |
| 1125 GlobalActivityTracker* global = GlobalActivityTracker::Get(); | 1170 GlobalActivityTracker* global = GlobalActivityTracker::Get(); |
| 1126 AutoLock lock(global->user_data_allocator_lock_); | 1171 AutoLock lock(global->user_data_allocator_lock_); |
| 1127 user_data_ = | 1172 user_data_ = |
| 1128 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); | 1173 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); |
| 1129 } else { | 1174 } else { |
| 1130 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); | 1175 user_data_ = MakeUnique<ActivityUserData>(); |
| 1131 } | 1176 } |
| 1132 } | 1177 } |
| 1133 return *user_data_; | 1178 return *user_data_; |
| 1134 } | 1179 } |
| 1135 | 1180 |
| 1136 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, | 1181 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, |
| 1137 size_t size) | 1182 size_t size) |
| 1138 : ActivityUserData(memory, size) {} | 1183 : ActivityUserData(memory, size) {} |
| 1139 | 1184 |
| 1140 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} | 1185 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} |
| (...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1552 | 1597 |
| 1553 // Remove the destructed tracker from the set of known ones. | 1598 // Remove the destructed tracker from the set of known ones. |
| 1554 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | 1599 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); |
| 1555 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | 1600 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); |
| 1556 | 1601 |
| 1557 // Release this memory for re-use at a later time. | 1602 // Release this memory for re-use at a later time. |
| 1558 base::AutoLock autolock(thread_tracker_allocator_lock_); | 1603 base::AutoLock autolock(thread_tracker_allocator_lock_); |
| 1559 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); | 1604 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); |
| 1560 } | 1605 } |
| 1561 | 1606 |
| 1607 void GlobalActivityTracker::RecordExceptionImpl(const void* pc, |
| 1608 const void* origin) { |
| 1609 // Get an existing tracker for this thread. It's not possible to create |
| 1610 // one at this point because such would involve memory allocations and |
| 1611 // other potentially complex operations that can cause failures if done |
| 1612 // within an exception handler. In most cases various operations will |
| 1613 // have already created the tracker so this shouldn't generally be a |
| 1614 // problem. |
| 1615 ThreadActivityTracker* tracker = GetTrackerForCurrentThread(); |
| 1616 if (!tracker) |
| 1617 return; |
| 1618 |
| 1619 tracker->RecordExceptionActivity(pc, origin, Activity::ACT_EXCEPTION, |
| 1620 ActivityData::ForGeneric(0, 0)); |
| 1621 } |
| 1622 |
| 1562 // static | 1623 // static |
| 1563 void GlobalActivityTracker::OnTLSDestroy(void* value) { | 1624 void GlobalActivityTracker::OnTLSDestroy(void* value) { |
| 1564 delete reinterpret_cast<ManagedActivityTracker*>(value); | 1625 delete reinterpret_cast<ManagedActivityTracker*>(value); |
| 1565 } | 1626 } |
| 1566 | 1627 |
| 1567 ScopedActivity::ScopedActivity(const void* program_counter, | 1628 ScopedActivity::ScopedActivity(const void* program_counter, |
| 1568 uint8_t action, | 1629 uint8_t action, |
| 1569 uint32_t id, | 1630 uint32_t id, |
| 1570 int32_t info) | 1631 int32_t info) |
| 1571 : GlobalActivityTracker::ScopedThreadActivity( | 1632 : GlobalActivityTracker::ScopedThreadActivity( |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1642 : GlobalActivityTracker::ScopedThreadActivity( | 1703 : GlobalActivityTracker::ScopedThreadActivity( |
| 1643 program_counter, | 1704 program_counter, |
| 1644 nullptr, | 1705 nullptr, |
| 1645 Activity::ACT_PROCESS_WAIT, | 1706 Activity::ACT_PROCESS_WAIT, |
| 1646 ActivityData::ForProcess(process->Pid()), | 1707 ActivityData::ForProcess(process->Pid()), |
| 1647 /*lock_allowed=*/true) {} | 1708 /*lock_allowed=*/true) {} |
| 1648 #endif | 1709 #endif |
| 1649 | 1710 |
| 1650 } // namespace debug | 1711 } // namespace debug |
| 1651 } // namespace base | 1712 } // namespace base |
| OLD | NEW |