Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(92)

Side by Side Diff: base/debug/activity_tracker.h

Issue 2255503002: New cache for the activity tracker. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: some clean up Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | base/debug/activity_tracker.cc » ('j') | base/debug/activity_tracker.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // Activity tracking provides a low-overhead method of collecting information 5 // Activity tracking provides a low-overhead method of collecting information
6 // about the state of the application for analysis both while it is running 6 // about the state of the application for analysis both while it is running
7 // and after it has terminated unexpectedly. Its primary purpose is to help 7 // and after it has terminated unexpectedly. Its primary purpose is to help
8 // locate reasons the browser becomes unresponsive by providing insight into 8 // locate reasons the browser becomes unresponsive by providing insight into
9 // what all the various threads and processes are (or were) doing. 9 // what all the various threads and processes are (or were) doing.
10 10
(...skipping 23 matching lines...) Expand all
34 class Lock; 34 class Lock;
35 class MemoryMappedFile; 35 class MemoryMappedFile;
36 class PlatformThreadHandle; 36 class PlatformThreadHandle;
37 class Process; 37 class Process;
38 class WaitableEvent; 38 class WaitableEvent;
39 39
40 namespace debug { 40 namespace debug {
41 41
42 class ThreadActivityTracker; 42 class ThreadActivityTracker;
43 43
44
45 //=============================================================================
46 // This class provides a lock-free FIFO of any atomic type with the
47 // limitation that there must be at least one "invalid" value. This is
48 // built as a completely generic type and can (hopefully) be moved to a
49 // more generally useful place in the future.
50 template <typename T, size_t SIZE, T INVALID_VALUE>
manzagop (departed) 2016/08/16 21:44:31 Would it be better for generated code size to excl
bcwhite 2016/08/17 19:29:23 Seems reasonable. I'll look into it.
bcwhite 2016/08/18 19:26:42 Done.
51 class LockFreeSimpleFifo {
52 enum : size_t {
53 CAPACITY = SIZE + 1 // +1 because one slot must always be free.
manzagop (departed) 2016/08/16 21:44:32 nit: isn't "size = capacity + 1" more accurate?
bcwhite 2016/08/18 19:26:42 Done.
54 };
55
56 public:
57 LockFreeSimpleFifo() : head_(0), tail_(0) {
58 // Ensure that the underlying atomics are also lock-free. This should
59 // evaluate to a constant at compile time and so produce no code, but
60 // a static_assert will not compile.
61 CHECK(head_.is_lock_free());
62 CHECK(stack_[0].is_lock_free());
63
64 // All elements must be "invalid" to start in order for the push/pop
65 // operations to work.
66 for (size_t i = 0; i < CAPACITY; ++i)
67 stack_[i].store(INVALID_VALUE, std::memory_order_relaxed);
68 }
69
70 T invalid_value() { return INVALID_VALUE; }
71 size_t size() { return SIZE; }
72 size_t used() {
73 return (head_.load(std::memory_order_relaxed) + CAPACITY -
74 tail_.load(std::memory_order_relaxed)) %
75 CAPACITY;
76 }
77 bool empty() {
78 return empty(head_.load(std::memory_order_relaxed),
79 tail_.load(std::memory_order_relaxed));
80 }
81 bool full() {
82 return full(head_.load(std::memory_order_relaxed),
83 tail_.load(std::memory_order_relaxed));
84 }
85
86 // Adds a new |value| to the end of the FIFO and returns true on success
87 // or false if the stack was full.
88 bool push(T value);
89
90 // Retrieves the first value off the FIFO and returns it or the "invalid"
91 // value if the stack is empty.
92 T pop();
93
94 private:
95 // Reports if the stack is empty/full based on explit head/tail values.
manzagop (departed) 2016/08/16 21:44:31 nit: explit?
bcwhite 2016/08/17 19:29:23 Done.
96 bool empty(size_t head, size_t tail) { return head == tail; }
97 bool full(size_t head, size_t tail) {
98 return (tail + CAPACITY - 1) % CAPACITY == head;
manzagop (departed) 2016/08/16 21:44:32 nit: perhaps comment on the modulo, since that var
bcwhite 2016/08/17 19:29:23 Done.
99 }
100
101 std::atomic<T> stack_[CAPACITY];
102 std::atomic<size_t> head_; // One past the newest value; where to push.
103 std::atomic<size_t> tail_; // The oldest value; first to pop.
104
105 DISALLOW_COPY_AND_ASSIGN(LockFreeSimpleFifo);
106 };
107
108 template <typename T, size_t SIZE, T INVALID_VALUE>
109 bool LockFreeSimpleFifo<T, SIZE, INVALID_VALUE>::push(T value) {
110 // Pushing the "invalid" value is not allowed; it would be skipped by pop.
111 CHECK_NE(INVALID_VALUE, value);
112
113 while (true) {
114 // Get the head of the stack and acquire its contents.
115 size_t head = head_.load(std::memory_order_acquire);
116 DCHECK_LE(0U, head);
117 DCHECK_GT(CAPACITY, head);
118
119 // If the stack is full, fail.
120 if (full(head, tail_.load(std::memory_order_relaxed)))
121 return false;
122
123 // Write the value being pushed to the top of the stack, exchanging it
124 // with the "invalid" value that should be there. If the atomic operation
125 // fails then something else has snuck in and pushed something else to
126 // that slot. A "strong" exchange is used to avoid mistakenly incrementing
127 // past the new value.
128 T value_expected = INVALID_VALUE;
129 if (!stack_[head].compare_exchange_strong(value_expected, value,
130 std::memory_order_release,
131 std::memory_order_relaxed)) {
132 // Something got pushed so increment the head pointer past it. This is
133 // done to handle the case where whatever has written the value somehow
134 // died between writing the value and incrementing the head pointer. In
135 // that (unusual) case, simply trying again would lead to an infinite
136 // loop. We try but don't care if it fails because failure just indicates
137 // that the other thread is behaving properly. A "strong" exchange is
138 // necessary to avoid false failures.
139 head_.compare_exchange_strong(head, head + 1,
140 std::memory_order_relaxed,
141 std::memory_order_relaxed);
142
143 // Try again.
144 continue;
145 }
146
147 // Increment the head, releasing the newly stored value. This could fail
148 // if another thread tried a simultaneous push, saw this value, and
149 // bumped the head in the crash-safty increment just above. Because the
150 // result is ignored, a "strong" exchange is necessary to ensure a
151 // false failure doesn't occur.
manzagop (departed) 2016/08/16 22:05:00 I'm wondering if there's a problem. Being able to
bcwhite 2016/08/17 19:29:23 So... multiple pushes and pops occur such that ta
manzagop (departed) 2016/08/18 14:14:46 I agree this fixes the problem cases. However, I'm
bcwhite 2016/08/18 19:26:42 I've simplified things now that thread-death is no
152 head_.compare_exchange_strong(head, head + 1,
153 std::memory_order_release,
154 std::memory_order_relaxed);
155
156 // Success!
157 return true;
158 }
159 }
160
161 template <typename T, size_t SIZE, T INVALID_VALUE>
162 T LockFreeSimpleFifo<T, SIZE, INVALID_VALUE>::pop() {
163 while (true) {
164 // Get the current number of elements on the stack.
165 size_t tail = tail_.load(std::memory_order_relaxed);
166 DCHECK_LE(0U, tail);
167 DCHECK_GT(CAPACITY, tail);
168
169 // If the stack is empty, fail.
170 if (empty(head_.load(std::memory_order_relaxed), tail))
171 return INVALID_VALUE;
172
173 // Read a value from the bottom stack, writing the "invalid" value in its
174 // place.
175 T value = stack_[tail].exchange(INVALID_VALUE, std::memory_order_relaxed);
176 if (value == INVALID_VALUE) {
177 // Another thread has already taken the "tail" value so increment the
178 // tail pointer past it. This is done to handle the case where whatever
179 // took the value somehow died between reading the value and incrementing
180 // the tail pointer. In that (unusual) case, simply trying again would
181 // lead to an infinite loop. We try but don't care if it fails because
182 // that just indicates that the other thread is behaving properly. A
183 // "strong" exchange is necessary to avoid false failures.
184 tail_.compare_exchange_strong(tail, tail + 1,
185 std::memory_order_relaxed,
186 std::memory_order_relaxed);
187
188 // Try again.
189 continue;
190 }
191
192 // Increment the tail, releasing the newly stored value. This could fail
193 // if another thread tried a simultaneous pop, saw the update, and
194 // bumped the tail in the crash-safty increment just above. Because the
195 // result is ignored, a "strong" exchange is necessary to ensure a
196 // false failure doesn't occur.
197 tail_.compare_exchange_strong(tail, tail + 1,
198 std::memory_order_release,
199 std::memory_order_relaxed);
200
201 // Success!
202 DCHECK_NE(INVALID_VALUE, value);
203 return value;
204 }
205 }
206 //=============================================================================
207
208
44 enum : int { 209 enum : int {
45 // The maximum number of call-stack addresses stored per activity. This 210 // The maximum number of call-stack addresses stored per activity. This
46 // cannot be changed without also changing the version number of the 211 // cannot be changed without also changing the version number of the
47 // structure. See kTypeIdActivityTracker in GlobalActivityTracker. 212 // structure. See kTypeIdActivityTracker in GlobalActivityTracker.
48 kActivityCallStackSize = 10, 213 kActivityCallStackSize = 10,
49 }; 214 };
50 215
51 // The data associated with an activity is dependent upon the activity type. 216 // The data associated with an activity is dependent upon the activity type.
52 // This union defines all of the various fields. All fields must be explicitly 217 // This union defines all of the various fields. All fields must be explicitly
53 // sized types to ensure no interoperability problems between 32-bit and 218 // sized types to ensure no interoperability problems between 32-bit and
(...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after
501 // The size (in bytes) of memory required by a ThreadActivityTracker to 666 // The size (in bytes) of memory required by a ThreadActivityTracker to
502 // provide the stack-depth requested during construction. 667 // provide the stack-depth requested during construction.
503 const size_t stack_memory_size_; 668 const size_t stack_memory_size_;
504 669
505 // The activity tracker for the currently executing thread. 670 // The activity tracker for the currently executing thread.
506 base::ThreadLocalStorage::Slot this_thread_tracker_; 671 base::ThreadLocalStorage::Slot this_thread_tracker_;
507 672
508 // These have to be lock-free because lock activity is tracked and causes 673 // These have to be lock-free because lock activity is tracked and causes
509 // re-entry problems. 674 // re-entry problems.
510 std::atomic<int> thread_tracker_count_; 675 std::atomic<int> thread_tracker_count_;
511 std::atomic<int> available_memories_count_; 676 LockFreeSimpleFifo<PersistentMemoryAllocator::Reference,
512 std::atomic<PersistentMemoryAllocator::Reference> 677 kMaxThreadCount,
513 available_memories_[kMaxThreadCount]; 678 PersistentMemoryAllocator::kReferenceNull>
679 available_memories_;
514 680
515 // The active global activity tracker. 681 // The active global activity tracker.
516 static GlobalActivityTracker* g_tracker_; 682 static GlobalActivityTracker* g_tracker_;
517 683
518 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker); 684 DISALLOW_COPY_AND_ASSIGN(GlobalActivityTracker);
519 }; 685 };
520 686
521 687
522 // Record entry in to and out of an arbitrary block of code. 688 // Record entry in to and out of an arbitrary block of code.
523 class BASE_EXPORT ScopedActivity 689 class BASE_EXPORT ScopedActivity
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
607 explicit ScopedProcessWaitActivity(const base::Process* process); 773 explicit ScopedProcessWaitActivity(const base::Process* process);
608 private: 774 private:
609 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity); 775 DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
610 }; 776 };
611 #endif 777 #endif
612 778
613 } // namespace debug 779 } // namespace debug
614 } // namespace base 780 } // namespace base
615 781
616 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_ 782 #endif // BASE_DEBUG_ACTIVITY_TRACKER_H_
OLDNEW
« no previous file with comments | « no previous file | base/debug/activity_tracker.cc » ('j') | base/debug/activity_tracker.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698