OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // Activity tracking provides a low-overhead method of collecting information | 5 // Activity tracking provides a low-overhead method of collecting information |
6 // about the state of the application for analysis both while it is running | 6 // about the state of the application for analysis both while it is running |
7 // and after it has terminated unexpectedly. Its primary purpose is to help | 7 // and after it has terminated unexpectedly. Its primary purpose is to help |
8 // locate reasons the browser becomes unresponsive by providing insight into | 8 // locate reasons the browser becomes unresponsive by providing insight into |
9 // what all the various threads and processes are (or were) doing. | 9 // what all the various threads and processes are (or were) doing. |
10 | 10 |
(...skipping 23 matching lines...) Expand all Loading... | |
34 class Lock; | 34 class Lock; |
35 class MemoryMappedFile; | 35 class MemoryMappedFile; |
36 class PlatformThreadHandle; | 36 class PlatformThreadHandle; |
37 class Process; | 37 class Process; |
38 class WaitableEvent; | 38 class WaitableEvent; |
39 | 39 |
40 namespace debug { | 40 namespace debug { |
41 | 41 |
42 class ThreadActivityTracker; | 42 class ThreadActivityTracker; |
43 | 43 |
44 | |
45 //============================================================================= | |
46 // This class provides a lock-free FIFO of any atomic type with the | |
47 // limitation that there must be at least one "invalid" value. This is | |
48 // built as a completely generic type and can (hopefully) be moved to a | |
49 // more generally useful place in the future. | |
50 template <typename T> | |
51 class LockFreeSimpleFifo { | |
52 public: | |
53 // Construct a simple lock-free FIFO with the specified |size| but | |
54 // not alowed to hold the |invalid_value|. | |
55 LockFreeSimpleFifo(size_t size, T invalid_value) | |
56 : size_(size + 1), invalid_value_(invalid_value), head_(0), tail_(0) { | |
57 DCHECK_LE(1, size); | |
58 | |
59 // Allocate memory for the FIFO value. Its size is the requested size +1 | |
60 // because it can never be full (head == tail is reserved to mean "empty"). | |
61 stack_.reset(new std::atomic<T>[size_]); | |
62 | |
63 // Ensure that the underlying atomics are also lock-free. This should | |
64 // evaluate to a constant at compile time and so produce no code, but | |
65 // a static_assert will not compile. | |
66 CHECK(head_.is_lock_free()); | |
67 CHECK(stack_[0].is_lock_free()); | |
68 | |
69 // All elements must be "invalid" to start in order for the push/pop | |
70 // operations to work. | |
71 for (size_t i = 0; i < size_; ++i) | |
72 stack_[i].store(invalid_value_, std::memory_order_relaxed); | |
73 } | |
74 | |
75 T invalid_value() { return invalid_value_; } | |
76 size_t size() { return size_ - 1; } | |
77 size_t used() { | |
78 return (head_.load(std::memory_order_relaxed) + size_ - | |
79 tail_.load(std::memory_order_relaxed)) % | |
80 size_; | |
81 } | |
82 bool empty() { | |
83 return empty(head_.load(std::memory_order_relaxed), | |
84 tail_.load(std::memory_order_relaxed)); | |
85 } | |
86 bool full() { | |
87 return full(head_.load(std::memory_order_relaxed), | |
88 tail_.load(std::memory_order_relaxed)); | |
89 } | |
90 | |
91 // Adds a new |value| to the end of the FIFO and returns true on success | |
92 // or false if the stack was full. | |
93 bool push(T value); | |
94 | |
95 // Retrieves the first value off the FIFO and returns it or the "invalid" | |
96 // value if the stack is empty. | |
97 T pop(); | |
98 | |
99 private: | |
100 // Reports if the stack is empty/full based on explicit head/tail values. | |
101 // Note that the % operaton in C/C++ is a "remainder" operator and thus will | |
102 // not provide correct "modular arithmetic" if the left value is negative; | |
103 // adding |size_| keeps it positive. | |
104 bool empty(size_t head, size_t tail) { return head == tail; } | |
105 bool full(size_t head, size_t tail) { | |
106 return (tail + size_ - 1) % size_ == head; | |
107 } | |
108 | |
109 const size_t size_; // Size of the internal |stack_|: requested + 1 | |
110 const T invalid_value_; // A value not allowed to be stored. | |
111 | |
112 std::atomic<size_t> head_; // One past the newest value; where to push. | |
113 std::atomic<size_t> tail_; // The oldest value; first to pop. | |
114 | |
115 // Array holding pushed values. | |
116 std::unique_ptr<std::atomic<T>[]> stack_; | |
117 | |
118 DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleFifo); | |
119 }; | |
120 | |
121 template <typename T> | |
122 bool LockFreeSimpleFifo<T>::push(T value) { | |
123 // Pushing the "invalid" value is not allowed; it would be cause an infinite | |
manzagop (departed)
2016/08/19 20:44:50
nit: "it would be cause"
bcwhite
2016/08/23 14:52:37
Done.
| |
124 // loop in pop. | |
125 CHECK_NE(invalid_value_, value); | |
126 | |
127 // Get the head of the stack and acquire its contents. | |
128 size_t head = head_.load(std::memory_order_acquire); | |
129 | |
130 while (true) { | |
131 DCHECK_LE(0U, head); | |
132 DCHECK_GT(size_, head); | |
133 | |
134 // If the stack is full, fail. | |
135 if (full(head, tail_.load(std::memory_order_relaxed))) | |
136 return false; | |
137 | |
138 // Write the value being pushed to the top of the fifo, exchanging it | |
139 // with the "invalid" value that should be there. If the atomic operation | |
140 // fails then something else has snuck in and pushed something else to | |
141 // that slot. A "strong" exchange is used to avoid mistakenly yielding | |
142 // the CPU. | |
143 T value_expected = invalid_value_; | |
144 if (!stack_[head].compare_exchange_strong(value_expected, value, | |
145 std::memory_order_release, | |
146 std::memory_order_relaxed)) { | |
147 // Get the new head since the one acquired above is no longer valid. | |
148 size_t new_head = head_.load(std::memory_order_acquire); | |
149 | |
150 // If the new head maches the old one then another thread is currently | |
manzagop (departed)
2016/08/19 20:44:50
typo: maches
bcwhite
2016/08/23 14:52:37
Acknowledged.
| |
151 // between the write of the value and the increment of the head. Give it | |
152 // a chance to run. | |
153 if (new_head == head) { | |
154 PlatformThread::YieldCurrentThread(); | |
155 new_head = head_.load(std::memory_order_acquire); | |
156 } | |
157 | |
158 // Try again. | |
159 head = new_head; | |
160 continue; | |
161 } | |
162 | |
163 // Increment the head, releasing the newly stored value. There is no reason | |
164 // for this to fail since only one thread at a time can be between the | |
manzagop (departed)
2016/08/19 20:44:50
Ugh... I think we still have the issue where after
bcwhite
2016/08/22 12:59:41
I don't think that can happen any more. Only the
manzagop (departed)
2016/08/22 14:49:29
Here's what I have in mind: T1 stores a copy of he
bcwhite
2016/08/23 14:52:37
Right. I'm going about this all wrong. A slot sh
| |
165 // exchange above and the increment below. A "weak" exchange is sufficient | |
166 // because a simple retry can handle false failures. | |
167 size_t expected_head = head; | |
168 while (!head_.compare_exchange_weak(expected_head, (head + 1) % size_, | |
169 std::memory_order_release, | |
170 std::memory_order_relaxed)) { | |
171 // |expected_head| is updated with the actual value stored there. | |
172 // Ensure it matches our expectation that only false-failures can | |
173 // occur. | |
174 DCHECK_EQ(head, expected_head); | |
175 } | |
176 | |
177 // Success! | |
178 return true; | |
179 } | |
180 } | |
181 | |
182 template <typename T> | |
183 T LockFreeSimpleFifo<T>::pop() { | |
184 // Get the current number of elements on the stack. | |
185 size_t tail = tail_.load(std::memory_order_acquire); | |
186 | |
187 while (true) { | |
188 DCHECK_LE(0U, tail); | |
189 DCHECK_GT(size_, tail); | |
190 | |
191 // If the stack is empty, fail. | |
192 if (empty(head_.load(std::memory_order_relaxed), tail)) | |
193 return invalid_value_; | |
194 | |
195 // Read a value from the bottom of the fifo, writing the "invalid" value | |
196 // in its place. If the retrieved value is invalid then it has already | |
197 // been taken. | |
198 T value = stack_[tail].exchange(invalid_value_, std::memory_order_relaxed); | |
199 if (value == invalid_value_) { | |
200 // Get the new tail since the one acquired above is no longer valid. | |
201 size_t new_tail = tail_.load(std::memory_order_acquire); | |
202 | |
203 // If the new tail maches the old one then another thread is currently | |
204 // between the read of the value and the increment of the tail. Give it | |
205 // a chance to run. | |
206 if (new_tail == tail) { | |
207 PlatformThread::YieldCurrentThread(); | |
208 new_tail = tail_.load(std::memory_order_acquire); | |
209 } | |
210 | |
211 // Try again. | |
212 tail = new_tail; | |
213 continue; | |
214 } | |
215 | |
216 // Increment the tail, releasing the newly stored value. There is no reason | |
217 // for this to fail since only one thread at a time can be between the | |
218 // exchange above and the increment below. A "weak" exchange is sufficient | |
219 // because a simple retry can handle false failures. | |
220 size_t expected_tail = tail; | |
221 while (!tail_.compare_exchange_weak(expected_tail, (tail + 1) % size_, | |
222 std::memory_order_release, | |
223 std::memory_order_relaxed)) { | |
224 // |expected_tail| is updated with the actual value stored there. | |
225 // Ensure it matches our expectation that only false-failures can | |
226 // occur. | |
227 DCHECK_EQ(tail, expected_tail); | |
228 } | |
229 | |
230 // Success! | |
231 DCHECK_NE(invalid_value_, value); | |
232 return value; | |
233 } | |
234 } | |
235 //============================================================================= | |
236 | |
237 | |
44 enum : int { | 238 enum : int { |
45 // The maximum number of call-stack addresses stored per activity. This | 239 // The maximum number of call-stack addresses stored per activity. This |
46 // cannot be changed without also changing the version number of the | 240 // cannot be changed without also changing the version number of the |
47 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. | 241 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. |
48 kActivityCallStackSize = 10, | 242 kActivityCallStackSize = 10, |
49 }; | 243 }; |
50 | 244 |
51 // The data associated with an activity is dependent upon the activity type. | 245 // The data associated with an activity is dependent upon the activity type. |
52 // This union defines all of the various fields. All fields must be explicitly | 246 // This union defines all of the various fields. All fields must be explicitly |
53 // sized types to ensure no interoperability problems between 32-bit and | 247 // sized types to ensure no interoperability problems between 32-bit and |
(...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
501 // The size (in bytes) of memory required by a ThreadActivityTracker to | 695 // The size (in bytes) of memory required by a ThreadActivityTracker to |
502 // provide the stack-depth requested during construction. | 696 // provide the stack-depth requested during construction. |
503 const size_t stack_memory_size_; | 697 const size_t stack_memory_size_; |
504 | 698 |
505 // The activity tracker for the currently executing thread. | 699 // The activity tracker for the currently executing thread. |
506 base::ThreadLocalStorage::Slot this_thread_tracker_; | 700 base::ThreadLocalStorage::Slot this_thread_tracker_; |
507 | 701 |
508 // These have to be lock-free because lock activity is tracked and causes | 702 // These have to be lock-free because lock activity is tracked and causes |
509 // re-entry problems. | 703 // re-entry problems. |
510 std::atomic<int> thread_tracker_count_; | 704 std::atomic<int> thread_tracker_count_; |
511 std::atomic<int> available_memories_count_; | 705 LockFreeSimpleFifo<PersistentMemoryAllocator::Reference> available_memories_; |
512 std::atomic<PersistentMemoryAllocator::Reference> | |
513 available_memories_[kMaxThreadCount]; | |
514 | 706 |
515 // The active global activity tracker. | 707 // The active global activity tracker. |
516 static GlobalActivityTracker* g_tracker_; | 708 static GlobalActivityTracker* g_tracker_; |
517 | 709 |
518 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); | 710 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); |
519 }; | 711 }; |
520 | 712 |
521 | 713 |
522 // Record entry in to and out of an arbitrary block of code. | 714 // Record entry in to and out of an arbitrary block of code. |
523 class BASE_EXPORT ScopedActivity | 715 class BASE_EXPORT ScopedActivity |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
607 explicit ScopedProcessWaitActivity(const base::Process* process); | 799 explicit ScopedProcessWaitActivity(const base::Process* process); |
608 private: | 800 private: |
609 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); | 801 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); |
610 }; | 802 }; |
611 #endif | 803 #endif |
612 | 804 |
613 } // namespace debug | 805 } // namespace debug |
614 } // namespace base | 806 } // namespace base |
615 | 807 |
616 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ | 808 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ |
OLD | NEW |