Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(379)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 1980743002: Track thread activities in order to diagnose hangs. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@readwrite-mmf
Patch Set: rebased Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/activity_tracker.h"
6
7 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h"
9 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h"
11 #include "base/logging.h"
12 #include "base/memory/ptr_util.h"
13 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram_macros.h"
15 #include "base/pending_task.h"
16 #include "base/process/process.h"
17 #include "base/process/process_handle.h"
18 #include "base/stl_util.h"
19 #include "base/strings/string_util.h"
20 #include "base/threading/platform_thread.h"
21
22 namespace base {
23 namespace debug {
24
25 namespace {
26
27 // A number that identifies the memory as having been initialized. It's
28 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker).
29 // A version number is added on so that major structure changes won't try to
30 // read an older version (since the cookie won't match).
31 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1
32
33 // The minimum depth a stack should support.
34 const int kMinStackDepth = 2;
35
36 } // namespace
37
38
39 // This information is kept for every thread that is tracked. It is filled
40 // the very first time the thread is seen. All fields must be of exact sizes
41 // so there is no issue moving between 32 and 64-bit builds.
42 struct ThreadActivityTracker::Header {
43 // This unique number indicates a valid initialization of the memory.
44 uint64_t cookie;
45
46 // The process-id and thread-id to which this data belongs. These identifiers
47 // are not guaranteed to mean anything but are unique, in combination, among
48 // all active trackers. It would be nice to always have the process_id be a
49 // 64-bit value but the necessity of having it atomic (for the memory barriers
50 // it provides) limits it to the natural word size of the machine.
51 #ifdef ARCH_CPU_64_BITS
52 std::atomic<int64_t> process_id;
53 #else
54 std::atomic<int32_t> process_id;
55 int32_t process_id_padding;
56 #endif
57
58 union {
59 int64_t as_id;
60 #if defined(OS_WIN)
61 // On Windows, the handle itself is often a pseudo-handle with a common
62 // value meaning "this thread" and so the thread-id is used. The former
63 // can be converted to a thread-id with a system call.
64 PlatformThreadId as_tid;
65 #elif defined(OS_POSIX)
66 // On Posix, the handle is always a unique identifier so no conversion
67 // needs to be done. However, it's value is officially opaque so there
68 // is no one correct way to convert it to a numerical identifier.
69 PlatformThreadHandle::Handle as_handle;
70 #endif
71 } thread_ref;
72
73 // The start-time and start-ticks when the data was created. Each activity
74 // record has a |time_internal| value that can be converted to a "wall time"
75 // with these two values.
76 int64_t start_time;
77 int64_t start_ticks;
78
79 // The number of Activity slots in the data.
80 uint32_t stack_slots;
81
82 // The current depth of the stack. This may be greater than the number of
83 // slots. If the depth exceeds the number of slots, the newest entries
84 // won't be recorded.
85 std::atomic<uint32_t> current_depth;
86
87 // A memory location used to indicate if changes have been made to the stack
88 // that would invalidate an in-progress read of its contents. The active
89 // tracker will zero the value whenever something gets popped from the
90 // stack. A monitoring tracker can write a non-zero value here, copy the
91 // stack contents, and read the value to know, if it is still non-zero, that
92 // the contents didn't change while being copied. This can handle concurrent
93 // snapshot operations only if each snapshot writes a different bit (which
94 // is not the current implementation so no parallel snapshots allowed).
95 std::atomic<uint32_t> stack_unchanged;
96
97 // The name of the thread (up to a maximum length). Dynamic-length names
98 // are not practical since the memory has to come from the same persistent
99 // allocator that holds this structure and to which this object has no
100 // reference.
101 char thread_name[32];
102 };
103
104 // It doesn't matter what is contained in this (though it will be all zeros)
105 // as only the address of it is important.
106 const ThreadActivityTracker::ActivityData
107 ThreadActivityTracker::kNullActivityData = {};
108
109 ThreadActivityTracker::ActivityData
110 ThreadActivityTracker::ActivityData::ForThread(
111 const PlatformThreadHandle& handle) {
112 // Header already has a conversion union; reuse that.
113 ThreadActivityTracker::Header header;
114 header.thread_ref.as_id = 0; // Zero the union in case other is smaller.
115 #if defined(OS_WIN)
116 header.thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
117 #elif defined(OS_POSIX)
118 header.thread_ref.as_handle = handle.platform_handle();
119 #endif
120 return ForThread(header.thread_ref.as_id);
121 }
122
123 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {}
124 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {}
125
126
127 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
128 : header_(static_cast<Header*>(base)),
129 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
130 sizeof(Header))),
131 stack_slots_(
132 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
133 DCHECK(thread_checker_.CalledOnValidThread());
134
135 // Verify the parameters but fail gracefully if they're not valid so that
136 // production code based on external inputs will not crash. IsValid() will
137 // return false in this case.
138 if (!base ||
139 // Ensure there is enough space for the header and at least a few records.
140 size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
141 // Ensure that the |stack_slots_| calculation didn't overflow.
142 (size - sizeof(Header)) / sizeof(Activity) >
143 std::numeric_limits<uint32_t>::max()) {
144 NOTREACHED();
145 return;
146 }
147
148 // Ensure that the thread reference doesn't exceed the size of the ID number.
149 // This won't compile at the global scope because Header is a private struct.
150 static_assert(
151 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
152 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
153
154 // Ensure that the alignment of Activity.data is properly aligned to a
155 // 64-bit boundary so there are no interoperability-issues across cpu
156 // architectures.
157 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
158 "ActivityData.data is not 64-bit aligned");
159
160 // Provided memory should either be completely initialized or all zeros.
161 if (header_->cookie == 0) {
162 // This is a new file. Double-check other fields and then initialize.
163 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
164 DCHECK_EQ(0, header_->thread_ref.as_id);
165 DCHECK_EQ(0, header_->start_time);
166 DCHECK_EQ(0, header_->start_ticks);
167 DCHECK_EQ(0U, header_->stack_slots);
168 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
169 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
170 DCHECK_EQ(0, stack_[0].time_internal);
171 DCHECK_EQ(0U, stack_[0].origin_address);
172 DCHECK_EQ(0U, stack_[0].call_stack[0]);
173 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
174
175 #if defined(OS_WIN)
176 header_->thread_ref.as_tid = PlatformThread::CurrentId();
177 #elif defined(OS_POSIX)
178 header_->thread_ref.as_handle =
179 PlatformThread::CurrentHandle().platform_handle();
180 #endif
181 header_->start_time = base::Time::Now().ToInternalValue();
182 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
183 header_->stack_slots = stack_slots_;
184 strlcpy(header_->thread_name, PlatformThread::GetName(),
185 sizeof(header_->thread_name));
186 header_->cookie = kHeaderCookie;
187
188 // This is done last so as to guarantee that everything above is "released"
189 // by the time this value gets written.
190 header_->process_id.store(GetCurrentProcId(), std::memory_order_release);
191
192 valid_ = true;
193 DCHECK(IsValid());
194 } else {
195 // This is a file with existing data. Perform basic consistency checks.
196 valid_ = true;
197 valid_ = IsValid();
198 }
199 }
200
201 ThreadActivityTracker::~ThreadActivityTracker() {}
202
203 void ThreadActivityTracker::PushActivity(const void* origin,
204 ActivityType type,
205 const ActivityData& data) {
206 // A thread-checker creates a lock to check the thread-id which means
207 // re-entry into this code if lock acquisitions are being tracked.
208 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread());
209
210 // Get the current depth of the stack. No access to other memory guarded
211 // by this variable is done here so a "relaxed" load is acceptable.
212 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
213
214 // Handle the case where the stack depth has exceeded the storage capacity.
215 // Extra entries will be lost leaving only the base of the stack.
216 if (depth >= stack_slots_) {
217 // Since no other threads modify the data, no compare/exchange is needed.
218 // Since no other memory is being modified, a "relaxed" store is acceptable.
219 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
220 return;
221 }
222
223 // Get a pointer to the next activity and load it. No atomicity is required
224 // here because the memory is known only to this thread. It will be made
225 // known to other threads once the depth is incremented.
226 Activity* activity = &stack_[depth];
227 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
228 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
229 activity->activity_type = type;
230 activity->data = data;
231
232 #if defined(SYZYASAN)
233 // Create a stacktrace from the current location and get the addresses.
234 StackTrace stack_trace;
235 size_t stack_depth;
236 const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
237 // Copy the stack addresses, ignoring the first one (here).
238 size_t i;
239 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
240 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
241 }
242 activity->call_stack[i - 1] = 0;
243 #else
244 // Since the memory was initially zero and nothing ever overwrites it in
245 // this "else" case, there is no need to write even the null terminator.
246 //activity->call_stack[0] = 0;
247 #endif
248
249 // Save the incremented depth. Because this guards |activity| memory filled
250 // above that may be read by another thread once the recorded depth changes,
251 // a "release" store is required.
252 header_->current_depth.store(depth + 1, std::memory_order_release);
253 }
254
255 void ThreadActivityTracker::ChangeActivity(ActivityType type,
256 const ActivityData& data) {
257 DCHECK(thread_checker_.CalledOnValidThread());
258 DCHECK(type != ACT_NULL || &data != &kNullActivityData);
259
260 // Get the current depth of the stack and acquire the data held there.
261 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
262 DCHECK_LT(0U, depth);
263
264 // Update the information if it is being recorded (i.e. within slot limit).
265 if (depth <= stack_slots_) {
266 Activity* activity = &stack_[depth - 1];
267
268 if (type != ACT_NULL) {
269 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK,
270 type & ACT_CATEGORY_MASK);
271 activity->activity_type = type;
272 }
273
274 if (&data != &kNullActivityData)
275 activity->data = data;
276 }
277 }
278
279 void ThreadActivityTracker::PopActivity() {
280 // Do an atomic decrement of the depth. No changes to stack entries guarded
281 // by this variable are done here so a "relaxed" operation is acceptable.
282 // |depth| will receive the value BEFORE it was modified.
283 uint32_t depth =
284 header_->current_depth.fetch_sub(1, std::memory_order_relaxed);
285
286 // Validate that everything is running correctly.
287 DCHECK_LT(0U, depth);
288
289 // A thread-checker creates a lock to check the thread-id which means
290 // re-entry into this code if lock acquisitions are being tracked.
291 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK_ACQUIRE ||
292 thread_checker_.CalledOnValidThread());
293
294 // The stack has shrunk meaning that some other thread trying to copy the
295 // contents for reporting purposes could get bad data. That thread would
296 // have written a non-zero value into |stack_unchanged|; clearing it here
297 // will let that thread detect that something did change. This needs to
298 // happen after the atomic |depth| operation above so a "release" store
299 // is required.
300 header_->stack_unchanged.store(0, std::memory_order_release);
301 }
302
303 bool ThreadActivityTracker::IsValid() const {
304 if (header_->cookie != kHeaderCookie ||
305 header_->process_id.load(std::memory_order_relaxed) == 0 ||
306 header_->thread_ref.as_id == 0 ||
307 header_->start_time == 0 ||
308 header_->start_ticks == 0 ||
309 header_->stack_slots != stack_slots_ ||
310 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
311 return false;
312 }
313
314 return valid_;
315 }
316
317 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const {
318 DCHECK(output_snapshot);
319
320 // There is no "called on valid thread" check for this method as it can be
321 // called from other threads or even other processes. It is also the reason
322 // why atomic operations must be used in certain places above.
323
324 // It's possible for the data to change while reading it in such a way that it
325 // invalidates the read. Make several attempts but don't try forever.
326 const int kMaxAttempts = 10;
327 uint32_t depth;
328
329 // Stop here if the data isn't valid.
330 if (!IsValid())
331 return false;
332
333 // Allocate the maximum size for the stack so it doesn't have to be done
334 // during the time-sensitive snapshot operation. It is shrunk once the
335 // actual size is known.
336 output_snapshot->activity_stack.reserve(stack_slots_);
337
338 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
339 // Remember the process and thread IDs to ensure they aren't replaced
340 // during the snapshot operation. Use "acquire" to ensure that all the
341 // non-atomic fields of the structure are valid (at least at the current
342 // moment in time).
343 const int64_t starting_process_id =
344 header_->process_id.load(std::memory_order_acquire);
345 const int64_t starting_thread_id = header_->thread_ref.as_id;
346
347 // Write a non-zero value to |stack_unchanged| so it's possible to detect
348 // at the end that nothing has changed since copying the data began. A
349 // "cst" operation is required to ensure it occurs before everything else.
350 // Using "cst" memory ordering is relatively expensive but this is only
351 // done during analysis so doesn't directly affect the worker threads.
352 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
353
354 // Fetching the current depth also "acquires" the contents of the stack.
355 depth = header_->current_depth.load(std::memory_order_acquire);
356 uint32_t count = std::min(depth, stack_slots_);
357 output_snapshot->activity_stack.resize(count);
358 if (count > 0) {
359 // Copy the existing contents. Memcpy is used for speed.
360 memcpy(&output_snapshot->activity_stack[0], stack_,
361 count * sizeof(Activity));
362 }
363
364 // Retry if something changed during the copy. A "cst" operation ensures
365 // it must happen after all the above operations.
366 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
367 continue;
368
369 // Stack copied. Record it's full depth.
370 output_snapshot->activity_stack_depth = depth;
371
372 // TODO(bcwhite): Snapshot other things here.
373
374 // Get the general thread information. Loading of "process_id" is guaranteed
375 // to be last so that it's possible to detect below if any content has
376 // changed while reading it. It's technically possible for a thread to end,
377 // have its data cleared, a new thread get created with the same IDs, and
378 // it perform an action which starts tracking all in the time since the
379 // ID reads above but the chance is so unlikely that it's not worth the
380 // effort and complexity of protecting against it (perhaps with an
381 // "unchanged" field like is done for the stack).
382 output_snapshot->thread_name =
383 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
384 output_snapshot->thread_id = header_->thread_ref.as_id;
385 output_snapshot->process_id =
386 header_->process_id.load(std::memory_order_seq_cst);
387
388 // All characters of the thread-name buffer were copied so as to not break
389 // if the trailing NUL were missing. Now limit the length if the actual
390 // name is shorter.
391 output_snapshot->thread_name.resize(
392 strlen(output_snapshot->thread_name.c_str()));
393
394 // If the process or thread ID has changed then the tracker has exited and
395 // the memory reused by a new one. Try again.
396 if (output_snapshot->process_id != starting_process_id ||
397 output_snapshot->thread_id != starting_thread_id) {
398 continue;
399 }
400
401 // Only successful if the data is still valid once everything is done since
402 // it's possible for the thread to end somewhere in the middle and all its
403 // values become garbage.
404 if (!IsValid())
405 return false;
406
407 // Change all the timestamps in the activities from "ticks" to "wall" time.
408 const Time start_time = Time::FromInternalValue(header_->start_time);
409 const int64_t start_ticks = header_->start_ticks;
410 for (Activity& activity : output_snapshot->activity_stack) {
411 activity.time_internal =
412 (start_time +
413 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
414 .ToInternalValue();
415 }
416
417 // Success!
418 return true;
419 }
420
421 // Too many attempts.
422 return false;
423 }
424
425 // static
426 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
427 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
428 }
429
430
431 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
432
433 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
434 PersistentMemoryAllocator::Reference mem_reference,
435 void* base,
436 size_t size)
437 : ThreadActivityTracker(base, size),
438 mem_reference_(mem_reference),
439 mem_base_(base) {}
440
441 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
442 // The global |g_tracker_| must point to the owner of this class since all
443 // objects of this type must be destructed before |g_tracker_| can be changed
444 // (something that only occurs in tests).
445 DCHECK(g_tracker_);
446 g_tracker_->ReturnTrackerMemory(this);
447 }
448
449 void GlobalActivityTracker::CreateWithAllocator(
450 std::unique_ptr<PersistentMemoryAllocator> allocator,
451 int stack_depth) {
452 // There's no need to do anything with the result. It is self-managing.
453 GlobalActivityTracker* global_tracker =
454 new GlobalActivityTracker(std::move(allocator), stack_depth);
455 // Create a tracker for this thread since it is known.
456 global_tracker->CreateTrackerForCurrentThread();
457 }
458
459 #if !defined(OS_NACL)
460 // static
461 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
462 size_t size,
463 uint64_t id,
464 StringPiece name,
465 int stack_depth) {
466 DCHECK(!file_path.empty());
467 DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
468
469 // Create and map the file into memory and make it globally available.
470 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
471 bool success =
472 mapped_file->Initialize(File(file_path,
473 File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
474 File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
475 {0, static_cast<int64_t>(size)},
476 MemoryMappedFile::READ_WRITE_EXTEND);
477 DCHECK(success);
478 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator(
479 std::move(mapped_file), size, id, name, false)),
480 stack_depth);
481 }
482 #endif // !defined(OS_NACL)
483
484 // static
485 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
486 uint64_t id,
487 StringPiece name,
488 int stack_depth) {
489 CreateWithAllocator(
490 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
491 stack_depth);
492 }
493
494 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
495 DCHECK(!this_thread_tracker_.Get());
496
497 PersistentMemoryAllocator::Reference mem_reference = 0;
498 void* mem_base = nullptr;
499
500 // Get the current count of available memories, acquiring the array values.
501 int count = available_memories_count_.load(std::memory_order_acquire);
502 while (count > 0) {
503 // There is a memory block that was previously released (and zeroed) so
504 // just re-use that rather than allocating a new one. Use "relaxed" because
505 // the value is guarded by the |count| "acquire". A zero reference replaces
506 // the existing value so that it can't be used by another thread that
507 // manages to interrupt this one before the count can be decremented.
508 // A zero reference is also required for the "push" operation to work
509 // once the count finally does get decremented.
510 mem_reference =
511 available_memories_[count - 1].exchange(0, std::memory_order_relaxed);
512
513 // If the reference is zero, it's already been taken but count hasn't yet
514 // been decremented. Give that other thread a chance to finish then reload
515 // the "count" value and try again.
516 if (!mem_reference) {
517 PlatformThread::YieldCurrentThread();
518 count = available_memories_count_.load(std::memory_order_acquire);
519 continue;
520 }
521
522 // Decrement the count indicating that the value has been taken. If this
523 // fails then another thread has pushed something new and incremented the
524 // count.
525 // NOTE: |oldcount| will be loaded with the existing value.
526 int oldcount = count;
527 if (!available_memories_count_.compare_exchange_strong(
528 oldcount, count - 1, std::memory_order_acquire,
529 std::memory_order_acquire)) {
530 DCHECK_LT(count, oldcount);
531
532 // Restore the reference that was zeroed above and try again.
533 available_memories_[count - 1].store(mem_reference,
534 std::memory_order_relaxed);
535 count = oldcount;
536 continue;
537 }
538
539 // Turn the reference back into one of the activity-tracker type.
540 mem_base = allocator_->GetAsObject<char>(mem_reference,
541 kTypeIdActivityTrackerFree);
542 DCHECK(mem_base);
543 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
544 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
545 kTypeIdActivityTrackerFree);
546 DCHECK(changed);
547
548 // Success.
549 break;
550 }
551
552 // Handle the case where no previously-used memories are available.
553 if (count == 0) {
554 // Allocate a block of memory from the persistent segment.
555 mem_reference =
556 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker);
557 if (mem_reference) {
558 // Success. Convert the reference to an actual memory address.
559 mem_base =
560 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
561 // Make the allocation iterable so it can be found by other processes.
562 allocator_->MakeIterable(mem_reference);
563 } else {
564 // Failure. This shouldn't happen.
565 NOTREACHED();
566 // But if it does, probably because the allocator wasn't given enough
567 // memory to satisfy all possible requests, handle it gracefully by
568 // allocating the required memory from the heap.
569 mem_base = new char[stack_memory_size_];
570 memset(mem_base, 0, stack_memory_size_);
571 // Report the thread-count at which the allocator was full so that the
572 // failure can be seen and underlying memory resized appropriately.
573 UMA_HISTOGRAM_COUNTS_1000(
574 "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
575 thread_tracker_count_.load(std::memory_order_relaxed));
576 }
577 }
578
579 // Create a tracker with the acquired memory and set it as the tracker
580 // for this particular thread in thread-local-storage.
581 DCHECK(mem_base);
582 ManagedActivityTracker* tracker =
583 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
584 DCHECK(tracker->IsValid());
585 this_thread_tracker_.Set(tracker);
586 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
587
588 UMA_HISTOGRAM_ENUMERATION("ActivityTracker.ThreadTrackers.Count",
589 old_count + 1, kMaxThreadCount);
590 return tracker;
591 }
592
593 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
594 ThreadActivityTracker* tracker =
595 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
596 if (tracker) {
597 this_thread_tracker_.Free();
598 delete tracker;
599 }
600 }
601
602 GlobalActivityTracker::GlobalActivityTracker(
603 std::unique_ptr<PersistentMemoryAllocator> allocator,
604 int stack_depth)
605 : allocator_(std::move(allocator)),
606 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
607 this_thread_tracker_(&OnTLSDestroy),
608 thread_tracker_count_(0),
609 available_memories_count_(0) {
610 // Clear the available-memories array.
611 memset(available_memories_, 0, sizeof(available_memories_));
612
613 // Ensure the passed memory is valid and empty (iterator finds nothing).
614 uint32_t type;
615 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
616
617 // Ensure that there is no other global object and then make this one such.
618 DCHECK(!g_tracker_);
619 g_tracker_ = this;
620 }
621
622 GlobalActivityTracker::~GlobalActivityTracker() {
623 DCHECK_EQ(g_tracker_, this);
624 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
625 g_tracker_ = nullptr;
626 }
627
628 void GlobalActivityTracker::ReturnTrackerMemory(
629 ManagedActivityTracker* tracker) {
630 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
631 void* mem_base = tracker->mem_base_;
632
633 // Zero the memory so that it is ready for use if needed again later. It's
634 // better to clear the memory now, when a thread is exiting, than to do it
635 // when it is first needed by a thread doing actual work.
636 memset(mem_base, 0, stack_memory_size_);
637
638 // Remove the destructed tracker from the set of known ones.
639 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
640 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
641
642 // Deal with the memory that was used by the tracker.
643 if (mem_reference) {
644 // The memory was within the persistent memory allocator. Change its type
645 // so that iteration won't find it.
646 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
647 kTypeIdActivityTracker);
648 // There is no way to free memory from a persistent allocator so instead
649 // push it on the internal list of available memory blocks.
650 while (true) {
651 // Get the existing count of available memories and ensure we won't
652 // burst the array. Acquire the values in the array.
653 int count = available_memories_count_.load(std::memory_order_acquire);
654 if (count >= kMaxThreadCount) {
655 NOTREACHED();
656 // Storage is full. Just forget about this memory. It won't be re-used
657 // but there's no real loss.
658 break;
659 }
660
661 // Write the reference of the memory being returned to this slot in the
662 // array. Empty slots have a value of zero so do an atomic compare-and-
663 // exchange to ensure that a race condition doesn't exist with another
664 // thread doing the same.
665 PersistentMemoryAllocator::Reference mem_expected = 0;
666 if (!available_memories_[count].compare_exchange_strong(
667 mem_expected, mem_reference, std::memory_order_release,
668 std::memory_order_relaxed)) {
669 PlatformThread::YieldCurrentThread();
670 continue; // Try again.
671 }
672
673 // Increment the count, releasing the value written to the array. This
674 // could fail if a simultaneous "pop" operation decremented the counter.
675 // If that happens, clear the array slot and start over. Do a "strong"
676 // exchange to avoid spurious retries that can occur with a "weak" one.
677 int expected = count; // Updated by compare/exchange.
678 if (!available_memories_count_.compare_exchange_strong(
679 expected, count + 1, std::memory_order_release,
680 std::memory_order_relaxed)) {
681 available_memories_[count].store(0, std::memory_order_relaxed);
682 continue;
683 }
684
685 // Count was successfully incremented to reflect the newly added value.
686 break;
687 }
688 } else {
689 // The memory was allocated from the process heap. This shouldn't happen
690 // because the persistent memory segment should be big enough for all
691 // thread stacks but it's better to support falling back to allocation
692 // from the heap rather than crash. Everything will work as normal but
693 // the data won't be persisted.
694 delete[] reinterpret_cast<char*>(mem_base);
695 }
696 }
697
698 // static
699 void GlobalActivityTracker::OnTLSDestroy(void* value) {
700 delete reinterpret_cast<ManagedActivityTracker*>(value);
701 }
702
703
704 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
705 uint8_t action,
706 uint32_t id,
707 int32_t info)
708 : GlobalActivityTracker::ScopedThreadActivity(
709 location.program_counter(),
710 static_cast<ThreadActivityTracker::ActivityType>(
711 ThreadActivityTracker::ACT_GENERIC | action),
712 ThreadActivityTracker::ActivityData::ForGeneric(id, info),
713 /*lock_allowed=*/true),
714 id_(id) {
715 // The action must not affect the category bits of the activity type.
716 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
717 }
718
719 void ScopedActivity::ChangeAction(uint8_t action) {
720 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
721 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
722 ThreadActivityTracker::ACT_GENERIC | action),
723 ThreadActivityTracker::kNullActivityData);
724 }
725
726 void ScopedActivity::ChangeInfo(int32_t info) {
727 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL,
728 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
729 }
730
731 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
732 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
733 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
734 ThreadActivityTracker::ACT_GENERIC | action),
735 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
736 }
737
738 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task)
739 : GlobalActivityTracker::ScopedThreadActivity(
740 task.posted_from.program_counter(),
741 ThreadActivityTracker::ACT_TASK_RUN,
742 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num),
743 /*lock_allowed=*/true) {}
744
745 ScopedLockAcquireActivity::ScopedLockAcquireActivity(
746 const base::internal::LockImpl* lock)
747 : GlobalActivityTracker::ScopedThreadActivity(
748 nullptr,
749 ThreadActivityTracker::ACT_LOCK_ACQUIRE,
750 ThreadActivityTracker::ActivityData::ForLock(lock),
751 /*lock_allowed=*/false) {}
752
753 ScopedEventWaitActivity::ScopedEventWaitActivity(
754 const base::WaitableEvent* event)
755 : GlobalActivityTracker::ScopedThreadActivity(
756 nullptr,
757 ThreadActivityTracker::ACT_EVENT_WAIT,
758 ThreadActivityTracker::ActivityData::ForEvent(event),
759 /*lock_allowed=*/true) {}
760
761 ScopedThreadJoinActivity::ScopedThreadJoinActivity(
762 const base::PlatformThreadHandle* thread)
763 : GlobalActivityTracker::ScopedThreadActivity(
764 nullptr,
765 ThreadActivityTracker::ACT_THREAD_JOIN,
766 ThreadActivityTracker::ActivityData::ForThread(*thread),
767 /*lock_allowed=*/true) {}
768
769 #if !defined(OS_NACL) && !defined(OS_IOS)
770 ScopedProcessWaitActivity::ScopedProcessWaitActivity(
771 const base::Process* process)
772 : GlobalActivityTracker::ScopedThreadActivity(
773 nullptr,
774 ThreadActivityTracker::ACT_PROCESS_WAIT,
775 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()),
776 /*lock_allowed=*/true) {}
777 #endif
778
779 } // namespace debug
780 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698