Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // Activity tracking provides a low-overhead method of collecting information | 5 // Activity tracking provides a low-overhead method of collecting information |
| 6 // about the state of the application for analysis both while it is running | 6 // about the state of the application for analysis both while it is running |
| 7 // and after it has terminated unexpectedly. Its primary purpose is to help | 7 // and after it has terminated unexpectedly. Its primary purpose is to help |
| 8 // locate reasons the browser becomes unresponsive by providing insight into | 8 // locate reasons the browser becomes unresponsive by providing insight into |
| 9 // what all the various threads and processes are (or were) doing. | 9 // what all the various threads and processes are (or were) doing. |
| 10 | 10 |
| 11 #ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_ | 11 #ifndef BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| 12 #define BASE_DEBUG_ACTIVITY_TRACKER_H_ | 12 #define BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| 13 | 13 |
| 14 // std::atomic is undesired due to performance issues when used as global | 14 // std::atomic is undesired due to performance issues when used as global |
| 15 // variables. There are no such instances here. This module uses the | 15 // variables. There are no such instances here. This module uses the |
| 16 // PersistentMemoryAllocator which also uses std::atomic and is written | 16 // PersistentMemoryAllocator which also uses std::atomic and is written |
| 17 // by the same author. | 17 // by the same author. |
| 18 #include <atomic> | 18 #include <atomic> |
| 19 #include <memory> | 19 #include <memory> |
| 20 #include <string> | 20 #include <string> |
| 21 #include <vector> | 21 #include <vector> |
| 22 | 22 |
| 23 #include "base/base_export.h" | 23 #include "base/base_export.h" |
| 24 #include "base/location.h" | 24 #include "base/location.h" |
| 25 #include "base/metrics/persistent_memory_allocator.h" | 25 #include "base/metrics/persistent_memory_allocator.h" |
| 26 #include "base/threading/platform_thread.h" | |
| 26 #include "base/threading/thread_checker.h" | 27 #include "base/threading/thread_checker.h" |
| 27 #include "base/threading/thread_local_storage.h" | 28 #include "base/threading/thread_local_storage.h" |
| 28 | 29 |
| 29 namespace base { | 30 namespace base { |
| 30 | 31 |
| 31 struct PendingTask; | 32 struct PendingTask; |
| 32 | 33 |
| 33 class FilePath; | 34 class FilePath; |
| 34 class Lock; | 35 class Lock; |
| 35 class MemoryMappedFile; | 36 class MemoryMappedFile; |
| 36 class PlatformThreadHandle; | 37 class PlatformThreadHandle; |
| 37 class Process; | 38 class Process; |
| 38 class WaitableEvent; | 39 class WaitableEvent; |
| 39 | 40 |
| 40 namespace debug { | 41 namespace debug { |
| 41 | 42 |
| 42 class ThreadActivityTracker; | 43 class ThreadActivityTracker; |
| 43 | 44 |
| 45 | |
| 46 //============================================================================= | |
| 47 // This class provides a lock-free queue of any atomic type with the | |
| 48 // limitation that there must be at least one "invalid" value. This is | |
| 49 // built as a completely generic type and can (hopefully) be moved to a | |
| 50 // more generally useful place in the future. | |
| 51 template <typename T> | |
| 52 class LockFreeSimpleQueue { | |
| 53 public: | |
| 54 // Construct a simple lock-free queue with the specified |size| but | |
| 55 // not alowed to hold the |invalid_value|. | |
| 56 LockFreeSimpleQueue(size_t size, T invalid_value) | |
| 57 : size_(size + 1), invalid_value_(invalid_value), head_(0), tail_(0) { | |
| 58 DCHECK_LE(1U, size); | |
| 59 | |
| 60 // Allocate memory for the queue value. Its size is the requested size +1 | |
| 61 // because it can never be full (head == tail is reserved to mean "empty"). | |
| 62 values_.reset(new std::atomic<T>[size_]); | |
| 63 | |
| 64 // Ensure that the underlying atomics are also lock-free. This should | |
| 65 // evaluate to a constant at compile time and so produce no code, but | |
| 66 // a static_assert will not compile. | |
| 67 CHECK(head_.is_lock_free()); | |
| 68 CHECK(values_[0].is_lock_free()); | |
| 69 | |
| 70 // All elements must be "invalid" to start in order for the push/pop | |
| 71 // operations to work. | |
| 72 for (size_t i = 0; i < size_; ++i) | |
| 73 values_[i].store(invalid_value_, std::memory_order_relaxed); | |
| 74 } | |
| 75 | |
| 76 T invalid_value() { return invalid_value_; } | |
| 77 size_t size() { return size_ - 1; } | |
| 78 size_t used() { | |
| 79 return (head_.load(std::memory_order_relaxed) + size_ - | |
| 80 tail_.load(std::memory_order_relaxed)) % | |
| 81 size_; | |
| 82 } | |
| 83 bool empty() { | |
| 84 return empty(head_.load(std::memory_order_relaxed), | |
| 85 tail_.load(std::memory_order_relaxed)); | |
| 86 } | |
| 87 bool full() { | |
| 88 return full(head_.load(std::memory_order_relaxed), | |
| 89 tail_.load(std::memory_order_relaxed)); | |
| 90 } | |
| 91 | |
| 92 // Adds a new |value| to the end of the queue and returns true on success | |
| 93 // or false if the stack was full. | |
| 94 bool push(T value); | |
| 95 | |
| 96 // Retrieves the first value off the queue and returns it or the "invalid" | |
| 97 // value if the stack is empty. | |
| 98 T pop(); | |
| 99 | |
| 100 private: | |
| 101 // Reports if the stack is empty/full based on explicit head/tail values. | |
| 102 // Note that the % operaton in C/C++ is a "remainder" operator and thus will | |
| 103 // not provide correct "modular arithmetic" if the left value is negative; | |
| 104 // adding |size_| keeps it positive. | |
| 105 bool empty(size_t head, size_t tail) { return head == tail; } | |
| 106 bool full(size_t head, size_t tail) { | |
| 107 return (tail + size_ - 1) % size_ == head; | |
| 108 } | |
| 109 | |
| 110 const size_t size_; // Size of the internal |values_|: requested + 1 | |
| 111 const T invalid_value_; // A value not allowed to be stored. | |
| 112 | |
| 113 std::atomic<size_t> head_; // One past the newest value; where to push. | |
| 114 std::atomic<size_t> tail_; // The oldest value; first to pop. | |
| 115 | |
| 116 // Array holding pushed values. | |
| 117 std::unique_ptr<std::atomic<T>[]> values_; | |
| 118 | |
| 119 DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleQueue); | |
| 120 }; | |
| 121 | |
| 122 template <typename T> | |
| 123 bool LockFreeSimpleQueue<T>::push(T value) { | |
| 124 // Pushing the "invalid" value is not allowed; it would cause an infinite | |
| 125 // loop in pop. | |
| 126 CHECK_NE(invalid_value_, value); | |
| 127 | |
| 128 // Get the head of the stack and acquire its contents. | |
| 129 size_t head = head_.load(std::memory_order_acquire); | |
| 130 | |
| 131 // In short: allocate a slot at the head of the queue, write the value to | |
| 132 // it, try again if anything gets in the way. | |
| 133 while (true) { | |
| 134 DCHECK_LE(0U, head); | |
| 135 DCHECK_GT(size_, head); | |
| 136 | |
| 137 // If the stack is full, fail. | |
| 138 if (full(head, tail_.load(std::memory_order_relaxed))) | |
| 139 return false; | |
| 140 | |
| 141 // The "head" is the critical resource so allocate a slot from the | |
| 142 // |values_| buffer at its current location, acquiring the value there. | |
| 143 // A "weak" operation is used because it's relatively trivial to try | |
| 144 // this operation again. | |
| 145 size_t slot = head; | |
| 146 if (!head_.compare_exchange_weak(slot, (head + 1) % size_, | |
| 147 std::memory_order_acquire, | |
| 148 std::memory_order_relaxed)) { | |
| 149 // The exchange will have loaded the latest "head" into |slot|. | |
| 150 head = slot; | |
| 151 continue; | |
| 152 } | |
| 153 | |
| 154 // Save the value being pushed to the reserved slot, overwriting the | |
| 155 // "invalid" value that must be there. | |
| 156 DCHECK_EQ(invalid_value_, values_[slot].load(std::memory_order_relaxed)); | |
| 157 values_[slot].store(value, std::memory_order_relaxed); | |
| 158 | |
| 159 // Success! | |
| 160 return true; | |
| 161 } | |
| 162 } | |
| 163 | |
| 164 template <typename T> | |
| 165 T LockFreeSimpleQueue<T>::pop() { | |
| 166 // Get the current number of elements on the stack. | |
| 167 size_t tail = tail_.load(std::memory_order_acquire); | |
| 168 | |
| 169 // In short: ensure the tail value is valid, take it, try again if anything | |
| 170 // goes wrong. | |
| 171 while (true) { | |
| 172 DCHECK_LE(0U, tail); | |
| 173 DCHECK_GT(size_, tail); | |
| 174 | |
| 175 // If the stack is empty, fail. | |
| 176 if (empty(head_.load(std::memory_order_relaxed), tail)) | |
| 177 return invalid_value_; | |
| 178 | |
| 179 // Read a value from the bottom of the queue, writing the "invalid" value | |
| 180 // in its place. If the retrieved value is invalid then something else is | |
| 181 // happening. | |
| 182 T value = values_[tail].exchange(invalid_value_, std::memory_order_relaxed); | |
| 183 if (value == invalid_value_) { | |
| 184 // Retrieve an updated "tail" value to see what happened. | |
| 185 size_t new_tail = tail_.load(std::memory_order_acquire); | |
| 186 if (new_tail == tail) { | |
| 187 // Either a push{) has reserved this slot but not yet written a value | |
| 188 // to it or another pop() has taken the value but not yet incremented | |
| 189 // the tail. | |
| 190 // It would be possible to simply act as if the queue were empty() but | |
| 191 // this could violate an expectation where there is known to be a | |
| 192 // single "popper" thread that has already verified that the queue is | |
| 193 // not empty and thus doesn't expect the pop() operation to fail. | |
| 194 // Instead, yield the CPU and retry. | |
| 195 PlatformThread::YieldCurrentThread(); | |
|
manzagop (departed)
2016/08/23 16:02:03
Do you need to reload tail in case it was an ongoi
bcwhite
2016/08/23 16:12:41
Done.
| |
| 196 } else { | |
| 197 // Another thread must have taken the value and incremented the tail. | |
| 198 // Nothing to do but try again. | |
| 199 } | |
| 200 | |
| 201 // Try again. | |
| 202 tail = new_tail; | |
| 203 continue; | |
| 204 } | |
| 205 | |
| 206 // Increment the tail, releasing the newly stored value. There is no reason | |
| 207 // for this to fail since only one thread at a time can be between the | |
| 208 // exchange above and the increment below. A "weak" exchange is sufficient | |
| 209 // because a simple retry can handle false failures. | |
| 210 size_t expected_tail = tail; | |
| 211 while (!tail_.compare_exchange_weak(expected_tail, (tail + 1) % size_, | |
| 212 std::memory_order_release, | |
| 213 std::memory_order_relaxed)) { | |
| 214 // |expected_tail| is updated with the actual value stored there. | |
| 215 // Ensure it matches our expectation that only false-failures can | |
| 216 // occur. | |
| 217 DCHECK_EQ(tail, expected_tail); | |
| 218 } | |
| 219 | |
| 220 // Success! | |
| 221 DCHECK_NE(invalid_value_, value); | |
| 222 return value; | |
| 223 } | |
| 224 } | |
| 225 //============================================================================= | |
| 226 | |
| 227 | |
| 44 enum : int { | 228 enum : int { |
| 45 // The maximum number of call-stack addresses stored per activity. This | 229 // The maximum number of call-stack addresses stored per activity. This |
| 46 // cannot be changed without also changing the version number of the | 230 // cannot be changed without also changing the version number of the |
| 47 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. | 231 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. |
| 48 kActivityCallStackSize = 10, | 232 kActivityCallStackSize = 10, |
| 49 }; | 233 }; |
| 50 | 234 |
| 51 // The data associated with an activity is dependent upon the activity type. | 235 // The data associated with an activity is dependent upon the activity type. |
| 52 // This union defines all of the various fields. All fields must be explicitly | 236 // This union defines all of the various fields. All fields must be explicitly |
| 53 // sized types to ensure no interoperability problems between 32-bit and | 237 // sized types to ensure no interoperability problems between 32-bit and |
| (...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 501 // The size (in bytes) of memory required by a ThreadActivityTracker to | 685 // The size (in bytes) of memory required by a ThreadActivityTracker to |
| 502 // provide the stack-depth requested during construction. | 686 // provide the stack-depth requested during construction. |
| 503 const size_t stack_memory_size_; | 687 const size_t stack_memory_size_; |
| 504 | 688 |
| 505 // The activity tracker for the currently executing thread. | 689 // The activity tracker for the currently executing thread. |
| 506 base::ThreadLocalStorage::Slot this_thread_tracker_; | 690 base::ThreadLocalStorage::Slot this_thread_tracker_; |
| 507 | 691 |
| 508 // These have to be lock-free because lock activity is tracked and causes | 692 // These have to be lock-free because lock activity is tracked and causes |
| 509 // re-entry problems. | 693 // re-entry problems. |
| 510 std::atomic<int> thread_tracker_count_; | 694 std::atomic<int> thread_tracker_count_; |
| 511 std::atomic<int> available_memories_count_; | 695 LockFreeSimpleQueue<PersistentMemoryAllocator::Reference> available_memories_; |
| 512 std::atomic<PersistentMemoryAllocator::Reference> | |
| 513 available_memories_[kMaxThreadCount]; | |
| 514 | 696 |
| 515 // The active global activity tracker. | 697 // The active global activity tracker. |
| 516 static GlobalActivityTracker* g_tracker_; | 698 static GlobalActivityTracker* g_tracker_; |
| 517 | 699 |
| 518 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); | 700 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); |
| 519 }; | 701 }; |
| 520 | 702 |
| 521 | 703 |
| 522 // Record entry in to and out of an arbitrary block of code. | 704 // Record entry in to and out of an arbitrary block of code. |
| 523 class BASE_EXPORT ScopedActivity | 705 class BASE_EXPORT ScopedActivity |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 607 explicit ScopedProcessWaitActivity(const base::Process* process); | 789 explicit ScopedProcessWaitActivity(const base::Process* process); |
| 608 private: | 790 private: |
| 609 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); | 791 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); |
| 610 }; | 792 }; |
| 611 #endif | 793 #endif |
| 612 | 794 |
| 613 } // namespace debug | 795 } // namespace debug |
| 614 } // namespace base | 796 } // namespace base |
| 615 | 797 |
| 616 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ | 798 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ |
| OLD | NEW |