Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // Activity tracking provides a low-overhead method of collecting information | 5 // Activity tracking provides a low-overhead method of collecting information |
| 6 // about the state of the application for analysis both while it is running | 6 // about the state of the application for analysis both while it is running |
| 7 // and after it has terminated unexpectedly. Its primary purpose is to help | 7 // and after it has terminated unexpectedly. Its primary purpose is to help |
| 8 // locate reasons the browser becomes unresponsive by providing insight into | 8 // locate reasons the browser becomes unresponsive by providing insight into |
| 9 // what all the various threads and processes are (or were) doing. | 9 // what all the various threads and processes are (or were) doing. |
| 10 | 10 |
| 11 #ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_ | 11 #ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| 12 #define BASE_DEBUG_ACTIVITY_TRACKER_H_ | 12 #define BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| 13 | 13 |
| 14 // std::atomic is undesired due to performance issues when used as global | 14 // std::atomic is undesired due to performance issues when used as global |
| 15 // variables. There are no such instances here. This module uses the | 15 // variables. There are no such instances here. This module uses the |
| 16 // PersistentMemoryAllocator which also uses std::atomic and is written | 16 // PersistentMemoryAllocator which also uses std::atomic and is written |
| 17 // by the same author. | 17 // by the same author. |
| 18 #include <atomic> | 18 #include <atomic> |
| 19 #include <memory> | 19 #include <memory> |
| 20 #include <string> | 20 #include <string> |
| 21 #include <vector> | 21 #include <vector> |
| 22 | 22 |
| 23 #include "base/base_export.h" | 23 #include "base/base_export.h" |
| 24 #include "base/location.h" | 24 #include "base/location.h" |
| 25 #include "base/metrics/persistent_memory_allocator.h" | 25 #include "base/metrics/persistent_memory_allocator.h" |
| 26 #include "base/threading/platform_thread.h" | |
| 26 #include "base/threading/thread_checker.h" | 27 #include "base/threading/thread_checker.h" |
| 27 #include "base/threading/thread_local_storage.h" | 28 #include "base/threading/thread_local_storage.h" |
| 28 | 29 |
| 29 namespace base { | 30 namespace base { |
| 30 | 31 |
| 31 struct PendingTask; | 32 struct PendingTask; |
| 32 | 33 |
| 33 class FilePath; | 34 class FilePath; |
| 34 class Lock; | 35 class Lock; |
| 35 class MemoryMappedFile; | 36 class MemoryMappedFile; |
| 36 class PlatformThreadHandle; | 37 class PlatformThreadHandle; |
| 37 class Process; | 38 class Process; |
| 38 class WaitableEvent; | 39 class WaitableEvent; |
| 39 | 40 |
| 40 namespace debug { | 41 namespace debug { |
| 41 | 42 |
| 42 class ThreadActivityTracker; | 43 class ThreadActivityTracker; |
| 43 | 44 |
| 45 | |
| 46 //============================================================================= | |
| 47 // This class provides a lock-free queue of any atomic type with the | |
| 48 // limitation that there must be at least one "invalid" value. This is | |
| 49 // built as a completely generic type and can (hopefully) be moved to a | |
| 50 // more generally useful place in the future. | |
| 51 template <typename T> | |
| 52 class LockFreeSimpleQueue { | |
| 53 public: | |
| 54 // Construct a simple lock-free queue with the specified |size| but | |
| 55 // not alowed to hold the |invalid_value|. | |
| 56 LockFreeSimpleQueue(size_t size, T invalid_value) | |
| 57 : size_(size + 1), invalid_value_(invalid_value), head_(0), tail_(0) { | |
| 58 DCHECK_LE(1U, size); | |
| 59 | |
| 60 // Allocate memory for the queue value. Its size is the requested size +1 | |
| 61 // because it can never be full (head == tail is reserved to mean "empty"). | |
| 62 values_.reset(new std::atomic<T>[size_]); | |
| 63 | |
| 64 // Ensure that the underlying atomics are also lock-free. This should | |
| 65 // evaluate to a constant at compile time and so produce no code, but | |
| 66 // a static_assert will not compile. | |
| 67 CHECK(head_.is_lock_free()); | |
| 68 CHECK(values_[0].is_lock_free()); | |
| 69 | |
| 70 // All elements must be "invalid" to start in order for the push/pop | |
| 71 // operations to work. | |
| 72 for (size_t i = 0; i < size_; ++i) | |
| 73 values_[i].store(invalid_value_, std::memory_order_relaxed); | |
| 74 } | |
| 75 | |
| 76 T invalid_value() { return invalid_value_; } | |
| 77 size_t size() { return size_ - 1; } | |
| 78 size_t used() { | |
| 79 return (head_.load(std::memory_order_relaxed) + size_ - | |
| 80 tail_.load(std::memory_order_relaxed)) % | |
| 81 size_; | |
| 82 } | |
| 83 bool empty() { | |
| 84 return empty(head_.load(std::memory_order_relaxed), | |
| 85 tail_.load(std::memory_order_relaxed)); | |
| 86 } | |
| 87 bool full() { | |
| 88 return full(head_.load(std::memory_order_relaxed), | |
| 89 tail_.load(std::memory_order_relaxed)); | |
| 90 } | |
| 91 | |
| 92 // Adds a new |value| to the end of the queue and returns true on success | |
| 93 // or false if the stack was full. | |
| 94 bool push(T value); | |
| 95 | |
| 96 // Retrieves the first value off the queue and returns it or the "invalid" | |
| 97 // value if the stack is empty. | |
| 98 T pop(); | |
| 99 | |
| 100 private: | |
| 101 // Reports if the stack is empty/full based on explicit head/tail values. | |
| 102 // Note that the % operaton in C/C++ is a "remainder" operator and thus will | |
| 103 // not provide correct "modular arithmetic" if the left value is negative; | |
| 104 // adding |size_| keeps it positive. | |
| 105 bool empty(size_t head, size_t tail) { return head == tail; } | |
| 106 bool full(size_t head, size_t tail) { | |
| 107 return (tail + size_ - 1) % size_ == head; | |
| 108 } | |
| 109 | |
| 110 const size_t size_; // Size of the internal |values_|: requested + 1 | |
| 111 const T invalid_value_; // A value not allowed to be stored. | |
| 112 | |
| 113 std::atomic<size_t> head_; // One past the newest value; where to push. | |
| 114 std::atomic<size_t> tail_; // The oldest value; first to pop. | |
| 115 | |
| 116 // Array holding pushed values. | |
| 117 std::unique_ptr<std::atomic<T>[]> values_; | |
| 118 | |
| 119 DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleQueue); | |
| 120 }; | |
| 121 | |
| 122 template <typename T> | |
| 123 bool LockFreeSimpleQueue<T>::push(T value) { | |
| 124 // Pushing the "invalid" value is not allowed; it would cause an infinite | |
| 125 // loop in pop. | |
| 126 CHECK_NE(invalid_value_, value); | |
| 127 | |
| 128 // Get the head of the stack and acquire its contents. | |
| 129 size_t head = head_.load(std::memory_order_acquire); | |
| 130 | |
| 131 // In short: allocate a slot at the head of the queue, write the value to | |
| 132 // it, try again if anything gets in the way. | |
| 133 while (true) { | |
| 134 DCHECK_LE(0U, head); | |
| 135 DCHECK_GT(size_, head); | |
| 136 | |
| 137 // If the stack is full, fail. | |
| 138 if (full(head, tail_.load(std::memory_order_relaxed))) | |
| 139 return false; | |
| 140 | |
| 141 // The "head" is the critical resource so allocate a slot from the | |
| 142 // |values_| buffer at its current location, acquiring the value there. | |
| 143 // A "weak" operation is used because it's relatively trivial to try | |
| 144 // this operation again. | |
| 145 size_t slot = head; | |
| 146 if (!head_.compare_exchange_weak(slot, (head + 1) % size_, | |
| 147 std::memory_order_acquire, | |
| 148 std::memory_order_relaxed)) { | |
| 149 // The exchange will have loaded the latest "head" into |slot|. | |
| 150 head = slot; | |
| 151 continue; | |
| 152 } | |
| 153 | |
| 154 // Save the value being pushed to the reserved slot, overwriting the | |
| 155 // "invalid" value that should be there. If it's not, it's because the | |
| 156 // slot was released by a pop() but that method hasn't yet extracted | |
| 157 // the value. Wait for it to do so. Use a "strong" exchange to avoid | |
| 158 // mistakenly releasing the CPU. | |
| 159 T expected_value = invalid_value_; | |
| 160 while (!values_[slot].compare_exchange_strong(expected_value, value, | |
| 161 std::memory_order_relaxed, | |
| 162 std::memory_order_relaxed)) { | |
| 163 PlatformThread::YieldCurrentThread(); | |
| 164 expected_value = invalid_value_; | |
| 165 } | |
| 166 | |
| 167 // Success! | |
| 168 return true; | |
| 169 } | |
| 170 } | |
| 171 | |
| 172 template <typename T> | |
| 173 T LockFreeSimpleQueue<T>::pop() { | |
| 174 // Get the current number of elements on the stack. | |
| 175 size_t tail = tail_.load(std::memory_order_acquire); | |
| 176 | |
| 177 // In short: deallocate the slot at the tail of the queue, read the value | |
|
manzagop (departed)
2016/08/23 19:46:08
I think it's possible that slot wasn't written to
bcwhite
2016/08/23 20:16:26
Either push could get written first but they'll ge
manzagop (departed)
2016/08/23 21:20:28
Sorry, I wasn't super clear. I mean that if there
bcwhite
2016/08/24 11:56:28
Let's see...
1) T1 push allocates slot X but does
manzagop (departed)
2016/08/24 13:17:52
That's my understanding as well. In the current us
bcwhite
2016/08/24 13:41:21
I think it goes without saying that parallel opera
| |
| 178 // from it, try again if anything goes wrong. | |
| 179 while (true) { | |
| 180 DCHECK_LE(0U, tail); | |
| 181 DCHECK_GT(size_, tail); | |
| 182 | |
| 183 // If the stack is empty, fail. | |
| 184 if (empty(head_.load(std::memory_order_relaxed), tail)) | |
| 185 return invalid_value_; | |
| 186 | |
| 187 // The "tail" is the critical resource so retrieve a slot from the | |
| 188 // |values_| buffer at its current location, acquiring the value there. | |
| 189 // A "weak" operation is used because it's relatively trivial to try | |
| 190 // this operation again. | |
| 191 size_t slot = tail; | |
| 192 if (!tail_.compare_exchange_weak(slot, (tail + 1) % size_, | |
| 193 std::memory_order_acquire, | |
| 194 std::memory_order_relaxed)) { | |
| 195 // The exchange will have loaded the latest "tail" into |slot|. | |
| 196 tail = slot; | |
| 197 continue; | |
| 198 } | |
| 199 | |
| 200 // Read a value from the bottom of the queue, writing the "invalid" value | |
| 201 // in its place. If the retrieved value is invalid then the slot was | |
| 202 // acquired by push() but that method hasn't yet written the value. Wait | |
| 203 // for it to do so. | |
| 204 T value; | |
| 205 while ((value = values_[slot].exchange( | |
| 206 invalid_value_, std::memory_order_relaxed)) == invalid_value_) { | |
| 207 PlatformThread::YieldCurrentThread(); | |
| 208 } | |
| 209 | |
| 210 // Success! | |
| 211 DCHECK_NE(invalid_value_, value); | |
| 212 return value; | |
| 213 } | |
| 214 } | |
| 215 //============================================================================= | |
| 216 | |
| 217 | |
| 44 enum : int { | 218 enum : int { |
| 45 // The maximum number of call-stack addresses stored per activity. This | 219 // The maximum number of call-stack addresses stored per activity. This |
| 46 // cannot be changed without also changing the version number of the | 220 // cannot be changed without also changing the version number of the |
| 47 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. | 221 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. |
| 48 kActivityCallStackSize = 10, | 222 kActivityCallStackSize = 10, |
| 49 }; | 223 }; |
| 50 | 224 |
| 51 // The data associated with an activity is dependent upon the activity type. | 225 // The data associated with an activity is dependent upon the activity type. |
| 52 // This union defines all of the various fields. All fields must be explicitly | 226 // This union defines all of the various fields. All fields must be explicitly |
| 53 // sized types to ensure no interoperability problems between 32-bit and | 227 // sized types to ensure no interoperability problems between 32-bit and |
| (...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 501 // The size (in bytes) of memory required by a ThreadActivityTracker to | 675 // The size (in bytes) of memory required by a ThreadActivityTracker to |
| 502 // provide the stack-depth requested during construction. | 676 // provide the stack-depth requested during construction. |
| 503 const size_t stack_memory_size_; | 677 const size_t stack_memory_size_; |
| 504 | 678 |
| 505 // The activity tracker for the currently executing thread. | 679 // The activity tracker for the currently executing thread. |
| 506 base::ThreadLocalStorage::Slot this_thread_tracker_; | 680 base::ThreadLocalStorage::Slot this_thread_tracker_; |
| 507 | 681 |
| 508 // These have to be lock-free because lock activity is tracked and causes | 682 // These have to be lock-free because lock activity is tracked and causes |
| 509 // re-entry problems. | 683 // re-entry problems. |
| 510 std::atomic<int> thread_tracker_count_; | 684 std::atomic<int> thread_tracker_count_; |
| 511 std::atomic<int> available_memories_count_; | 685 LockFreeSimpleQueue<PersistentMemoryAllocator::Reference> available_memories_; |
| 512 std::atomic<PersistentMemoryAllocator::Reference> | |
| 513 available_memories_[kMaxThreadCount]; | |
| 514 | 686 |
| 515 // The active global activity tracker. | 687 // The active global activity tracker. |
| 516 static GlobalActivityTracker* g_tracker_; | 688 static GlobalActivityTracker* g_tracker_; |
| 517 | 689 |
| 518 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); | 690 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); |
| 519 }; | 691 }; |
| 520 | 692 |
| 521 | 693 |
| 522 // Record entry in to and out of an arbitrary block of code. | 694 // Record entry in to and out of an arbitrary block of code. |
| 523 class BASE_EXPORT ScopedActivity | 695 class BASE_EXPORT ScopedActivity |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 607 explicit ScopedProcessWaitActivity(const base::Process* process); | 779 explicit ScopedProcessWaitActivity(const base::Process* process); |
| 608 private: | 780 private: |
| 609 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); | 781 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); |
| 610 }; | 782 }; |
| 611 #endif | 783 #endif |
| 612 | 784 |
| 613 } // namespace debug | 785 } // namespace debug |
| 614 } // namespace base | 786 } // namespace base |
| 615 | 787 |
| 616 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ | 788 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| OLD | NEW |