Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 1980743002: Track thread activities in order to diagnose hangs. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@readwrite-mmf
Patch Set: rebased Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/activity_tracker.h"
6
7 #include <atomic>
8
9 #include "base/files/memory_mapped_file.h"
10 #include "base/logging.h"
11 #include "base/memory/ptr_util.h"
12 #include "base/metrics/field_trial.h"
13 #include "base/pending_task.h"
14 #include "base/stl_util.h"
15
16 namespace base {
17 namespace debug {
18
19 namespace {
20
21 // A number that indetifies memory has having been initialized.
manzagop (departed) 2016/05/20 18:19:29 nit: typos
bcwhite 2016/05/20 19:19:18 Done. (I have done zero proof-reading so far.)
22 const uint64_t kHeaderCookie = 0x98476A390137E67A + 1; // v1
manzagop (departed) 2016/05/20 18:19:29 Comment on where this value comes from?
bcwhite 2016/05/20 19:19:18 Done.
23
24 // The minimum depth a stack should support.
25 const int kMinStackDepth = 2;
26
27 // Type identifiers used when storing in persistent memory so they can be
28 // identified during extraction; the first 4 bytes of the SHA1 of the name
29 // is used as a unique integer. A "version number" is added to the base
30 // so that, if the structure of that object changes, stored older versions
31 // will be safely ignored.
32 enum : uint32_t {
33 kTypeIdActivityTracker = 0x5D7381AF + 1, // SHA1(ActivityTracker) v1
34 kTypeIdActivityTrackerFree = 0x3F0272FB, // SHA1(ActivityTrackerFree)
35 };
36
37 } // namespace
38
39 const char kActivityTrackingFeatureName[] = "ActivityTracking";
40
41 void SetupGlobalActivityTrackerFieldTrial() {
42 const size_t kMemorySize = 1 << 20; // 1 MiB
manzagop (departed) 2016/05/20 18:19:30 Can you comment on how you chose this number? (k t
bcwhite 2016/05/20 19:19:19 Acknowledged.
43 const int kStackDepth = 3;
44 const uint64_t kAllocatorId = 0;
45 const char kAllocatorName[] = "ActivityTracker";
46
47 const std::string group_name =
48 FieldTrialList::FindFullName(kActivityTrackingFeatureName);
49 if (group_name.empty() || group_name == "Disabled")
50 return;
51
52 if (group_name == "InMemory")
manzagop (departed) 2016/05/20 18:19:29 nit: braces when multiline.
bcwhite 2016/05/20 19:19:18 Done.
53 GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, kAllocatorId,
54 kAllocatorName, kStackDepth);
55 else
56 NOTREACHED() << group_name;
manzagop (departed) 2016/05/20 18:19:30 Isn't it better to silently disable, and the perso
bcwhite 2016/05/20 19:19:19 That's what'll happen in a release build.
57 }
58
59
60 struct ThreadActivityTracker::Header {
61 // This unique number indicates a valid initialization of the memory.
62 uint64_t cookie;
63
64 // The thread-id to which this data belongs. This identifier is not
65 // guaranteed to mean anything, just to be unique among all active
66 // trackers.
67 uint64_t thread_id;
68
69 // The start-time and start-ticks when the data was created. Each activity
70 // record has a |time_ticks| value that can be converted to a "wall time"
71 // with these two values.
72 int64_t start_time;
73 int64_t start_ticks;
74
75 // The number of Activity slots in the data.
76 uint32_t slots;
77
78 // The current depth of the stack. This may be greater than the number of
79 // slots. If the depth exceeds the number of slots, the newest entries
80 // won't be recorded.
81 std::atomic<uint32_t> depth;
manzagop (departed) 2016/05/20 18:19:30 Is atomic<POD> considered POD? Can there be versio
bcwhite 2016/05/20 19:19:19 Yes. No. It's exactly the underlying type but wi
82
83 // A memory location used to indicate if changes have been made to the stack
84 // that would invalidate an in-progress read of its contents. The active
85 // tracker will zero the value whenever something gets popped from the
86 // stack. A monitoring tracker can write a non-zero value here, copy the
87 // stack contents, and read the value to know, if it is still non-zero, that
88 // the contents didn't change while being copied.
89 std::atomic<int> unchanged;
manzagop (departed) 2016/05/20 18:19:30 int vs bool?
bcwhite 2016/05/20 19:19:18 I chose to use a natural word size so there's no n
90 };
91
92 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
93 : header_(static_cast<Header*>(base)),
94 stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) +
95 sizeof(Header))),
96 slots_((size - sizeof(Header)) / sizeof(StackEntry)) {
manzagop (departed) 2016/05/20 18:19:29 DCHECK(base)?
bcwhite 2016/05/20 19:19:18 Done.
97 DCHECK(thread_checker_.CalledOnValidThread());
98
99 // Ensure there is enough space for the header and at least a few records.
100 DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(StackEntry), size);
101
102 // Ensure that the |slots_| calculation didn't overflow.
103 DCHECK_GE(std::numeric_limits<uint32_t>::max(),
104 (size - sizeof(Header)) / sizeof(StackEntry));
105
106 // Provided memory should either be completely initialized or all zeros.
107 if (header_->cookie == 0) {
108 // This is a new file. Double-check other fields and then initialize.
109 DCHECK_EQ(0U, header_->thread_id);
110 DCHECK_EQ(0, header_->start_time);
111 DCHECK_EQ(0, header_->start_ticks);
112 DCHECK_EQ(0U, header_->slots);
113 DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed));
114 DCHECK_EQ(0, header_->unchanged.load(std::memory_order_relaxed));
115 DCHECK_EQ(0, stack_[0].time_ticks);
116 DCHECK_EQ(0, stack_[0].source_address);
117 DCHECK_EQ(0, stack_[0].method_address);
118 DCHECK_EQ(0U, stack_[0].sequence_id);
119
120 header_->cookie = kHeaderCookie;
121 header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId());
122 header_->start_time = base::Time::Now().ToInternalValue();
123 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
124 header_->slots = slots_;
125 valid_ = true;
126 } else {
127 // This is a file with existing data. Perform basic consistency checks.
128 if (header_->cookie != kHeaderCookie ||
129 header_->slots != slots_ ||
130 header_->start_time > base::Time::Now().ToInternalValue() ||
131 stack_[0].time_ticks == 0)
132 return;
133 valid_ = true;
134 }
135 }
136
137 ThreadActivityTracker::~ThreadActivityTracker() {}
138
139 void ThreadActivityTracker::RecordStart(const void* source,
140 ActivityType activity,
141 intptr_t method,
142 uint64_t sequence) {
143 DCHECK(thread_checker_.CalledOnValidThread());
144
145 // Get the current depth of the stack. No access to other memory guarded
146 // by this variable is done here so a "relaxed" load is acceptable.
147 uint32_t depth = header_->depth.load(std::memory_order_relaxed);
148
149 // Handle the case where the stack depth has exceeded the storage capacity.
150 // Extra entries will be lost leaving only the base of the stack.
151 if (depth >= slots_) {
152 // Since no other memory is being modified, a "relaxed" store is acceptable.
153 header_->depth.store(depth + 1, std::memory_order_relaxed);
154 return;
155 }
156
157 // Get a pointer to the next entry and load it. No atomicity is required
158 // here because the memory is known only to this thread. It will be made
159 // known to other threads once the depth is incremented.
160 StackEntry* entry = &stack_[depth];
161 entry->time_ticks = base::TimeTicks::Now().ToInternalValue();
162 entry->activity_type = activity;
163 entry->source_address = reinterpret_cast<intptr_t>(source);
164 entry->method_address = method;
165 entry->sequence_id = sequence;
166
167 // Save the incremented depth. Because this guards |entry| memory filled
168 // above that may be read by another thread once the recorded depth changes,
169 // a "release" store is required.
170 header_->depth.store(depth + 1, std::memory_order_release);
171 }
172
173 void ThreadActivityTracker::RecordFinish(const void* source) {
174 DCHECK(thread_checker_.CalledOnValidThread());
175
176 // Do an atomic decrement of the depth. No changes to stack entries guarded
177 // by this variable is done here so a "relaxed" operation is acceptable.
178 // |depth| will receive the value before it was modified.
179 uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed);
180
181 // Validate that everything is running correctly.
182 DCHECK_LT(0U, depth);
183 if (depth <= slots_) {
184 DCHECK_EQ(reinterpret_cast<intptr_t>(source),
185 stack_[depth - 1].source_address);
186 }
187
188 // The stack has shrunk meaning that some other thread trying to copy the
189 // contents for reporting purposes could get bad data. That thread would
190 // have written a non-zero value into |unchanged|; clearing it here will
191 // let that thread detect that something did change. It doesn't matter
192 // when this is done relative to the atomic |depth| operation above so a
193 // "relaxed" access is acceptable.
194 header_->unchanged.store(0, std::memory_order_relaxed);
195 }
196
197 uint32_t ThreadActivityTracker::CopyStack(std::vector<StackEntry>* stack) {
198 // It's possible for the data to change while reading it. Make several
manzagop (departed) 2016/05/20 18:19:30 Nit: add that (only) some of these changes invalid
bcwhite 2016/05/20 19:19:18 Done.
199 // attempts but don't try forever.
200 const int kMaxAttempts = 10;
201 uint32_t depth;
202
203 // Start with an empty return stack.
204 stack->clear();
205
206 // Stop here if the data isn't valid.
207 if (!valid_)
manzagop (departed) 2016/05/20 18:19:30 The return value is ambiguous wrt the function suc
bcwhite 2016/05/20 19:19:18 Caller can always check is_valid() first. Returni
208 return 0;
209
210 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
211 // Write a non-zero value to |unchanged| so it's possible to detect at
212 // the end that nothing has changed since copying the data began.
213 header_->unchanged.store(1, std::memory_order_relaxed);
214
215 // Fetching the current depth also "acquires" the contents of the stack.
216 depth = header_->depth.load(std::memory_order_acquire);
217 if (depth == 0)
218 return 0;
219
220 // Copy the existing contents. Memcpy is used for speed.
221 uint32_t count = std::min(depth, slots_);
222 stack->resize(count);
223 memcpy(&(*stack)[0], stack_, count * sizeof(StackEntry));
224
225 // Check to make sure everything was unchanged during the copy.
manzagop (departed) 2016/05/20 18:19:29 Is there a constraint there may be at most 1 reade
bcwhite 2016/05/20 19:19:18 Good point. Multiple readers would be possible if
226 if (header_->unchanged.load(std::memory_order_relaxed))
227 return depth;
228 }
229
230 // If all attempts failed, just return the depth with no content.
231 stack->clear();
232 return depth;
233 }
234
235 // static
236 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
237 return static_cast<size_t>(stack_depth) * sizeof(StackEntry) + sizeof(Header);
238 }
239
240
241 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
242
243 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
244 PersistentMemoryAllocator::Reference mem_reference,
245 void* base,
246 size_t size)
247 : ThreadActivityTracker(base, size),
248 mem_reference_(mem_reference),
249 mem_base_(base) {}
250
251 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
252 // The global |g_tracker_| must point to the owner of this class since all
253 // objects of this type must be destructed before |g_tracker_| can be changed.
254 DCHECK(g_tracker_);
255 g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_);
256 }
257
258 void GlobalActivityTracker::CreateWithAllocator(
259 std::unique_ptr<PersistentMemoryAllocator> allocator,
260 int stack_depth) {
261 // There's no need to do anything with the result. It is self-managing.
262 new GlobalActivityTracker(std::move(allocator), stack_depth);
263 }
264
265 // static
266 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
267 uint64_t id,
268 StringPiece name,
269 int stack_depth) {
270 CreateWithAllocator(
271 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
272 stack_depth);
273 }
274
275 // static
276 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
277 size_t size,
278 uint64_t id,
279 StringPiece name,
280 int stack_depth) {
281 // Create the file, overwriting anything that was there previously, and set
282 // the length. This will create a space that is zero-filled, a requirement
283 // for operation.
284 File file(file_path,
285 File::FLAG_CREATE_ALWAYS | File::FLAG_READ | File::FLAG_WRITE);
286 file.SetLength(size);
287
288 // Map the file into memory and make it globally available.
289 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
290 mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
291 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator(
292 std::move(mapped_file), id, name)),
293 stack_depth);
294 }
295
296 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
297 DCHECK(!this_thread_tracker_.Get());
298
299 // The lock must be acquired to access the STL data structures.
300 AutoLock auto_lock(lock_);
301
302 PersistentMemoryAllocator::Reference mem_reference;
303 void* mem_base;
304 if (!available_memories_.empty()) {
305 // There is a memory block that was previously released (and zero'd) so
306 // just re-use that rather than allocating a new one.
307 mem_reference = available_memories_.back();
308 available_memories_.pop_back();
309 mem_base = allocator_->GetAsObject<char>(mem_reference,
310 kTypeIdActivityTrackerFree);
311 DCHECK(mem_base);
312 DCHECK_LT(stack_memory_, allocator_->GetAllocSize(mem_reference));
manzagop (departed) 2016/05/20 20:24:21 Is this not EQ? Or LE?
bcwhite 2016/05/20 20:41:18 The space gets rounded up for alignment reasons so
313 allocator_->SetType(mem_reference, kTypeIdActivityTracker);
314 } else {
315 // Allocate a block of memory from the persistent segment.
316 mem_reference = allocator_->Allocate(stack_memory_, kTypeIdActivityTracker);
317 if (mem_reference) {
318 // Success. Convert the reference to an actual memory address.
319 mem_base =
320 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
321 } else {
322 // Failure. This should never happen.
323 NOTREACHED();
324 // But if it does, handle it gracefully by allocating the required
325 // memory from the heap.
326 mem_base = new char[stack_memory_];
327 memset(mem_base, 0, stack_memory_);
328 }
329 }
330
331 // Create a tracker with the acquired memory and set it as the tracker
332 // for this particular thread in thread-local-storage.
333 ManagedActivityTracker* tracker =
334 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_);
335 DCHECK(tracker->is_valid());
336 thread_trackers_.insert(tracker);
337 this_thread_tracker_.Set(tracker);
338
339 return tracker;
340 }
341
342 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
343 ThreadActivityTracker* tracker =
344 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
345 if (tracker) {
346 this_thread_tracker_.Free();
347 delete tracker;
348 }
349 }
350
351 GlobalActivityTracker::GlobalActivityTracker(
352 std::unique_ptr<PersistentMemoryAllocator> allocator,
353 int stack_depth)
354 : allocator_(std::move(allocator)),
355 stack_memory_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
356 this_thread_tracker_(&OnTLSDestroy) {
357 // Ensure the passed memory is valid and empty (iterator finds nothing).
358 uint32_t type;
359 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
360
361 // Ensure that there is no other global object and then make this one such.
362 DCHECK(!g_tracker_);
363 g_tracker_ = this;
364
365 // Create a tracker for this thread since it is known.
366 CreateTrackerForCurrentThread();
367 }
368
369 GlobalActivityTracker::~GlobalActivityTracker() {
370 DCHECK_EQ(g_tracker_, this);
371 DCHECK_EQ(0U, thread_trackers_.size());
372 g_tracker_ = nullptr;
373 }
374
375 void GlobalActivityTracker::ReturnTrackerMemory(
376 ManagedActivityTracker* tracker,
377 PersistentMemoryAllocator::Reference mem_reference,
378 void* mem_base) {
379 // Zero the memory so that it is ready for use if needed again later. It's
380 // better to clear the memory now, when a thread is exiting, than to do it
381 // when it is first needed by a thread doing actual work.
382 memset(mem_base, 0, stack_memory_);
383
384 // Access to STL structurs requires a lock because this could get called
385 // from any thread.
386 AutoLock auto_lock(lock_);
387
388 // Remove the destructed tracker from the set of known ones.
389 DCHECK(ContainsKey(thread_trackers_, tracker));
390 thread_trackers_.erase(tracker);
391
392 // Deal with the memory that was used by the tracker.
393 if (mem_reference) {
394 // The memory was within the persistent memory allocator. Change its type
395 // so that iteration won't find it.
396 allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree);
397 // There is no way to free memory from a persistent allocator so instead
398 // keep it on the internal list of available memory blocks.
399 DCHECK_LE(stack_memory_, allocator_->GetAllocSize(mem_reference));
400 available_memories_.push_back(mem_reference);
401 } else {
402 // The memory was allocated from the process heap. This shouldn't happen
403 // because the persistent memory segment should be big enough for all
404 // thread stacks but it's better to support falling back to allocation
405 // from the heap rather than crash. Everything will work as normal but
406 // the data won't be persisted.
407 delete[] reinterpret_cast<char*>(mem_base);
408 }
409 }
410
411 // static
412 void GlobalActivityTracker::OnTLSDestroy(void* value) {
413 delete reinterpret_cast<ManagedActivityTracker*>(value);
414 }
415
416 ScopedTaskActivity::ScopedTaskActivity(const PendingTask& task)
417 : GlobalActivityTracker::ScopedThreadActivity(
418 task.posted_from.program_counter(),
419 ThreadActivityTracker::ACT_TASK,
420 0,
421 task.sequence_num) {}
422
423 } // namespace debug
424 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698