Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(108)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 1980743002: Track thread activities in order to diagnose hangs. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@readwrite-mmf
Patch Set: addressed review comments Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/activity_tracker.h"
6
7 #include "base/debug/stack_trace.h"
8 #include "base/feature_list.h"
9 #include "base/files/file.h"
10 #include "base/files/file_path.h"
11 #include "base/files/memory_mapped_file.h"
12 #include "base/logging.h"
13 #include "base/memory/ptr_util.h"
14 #include "base/metrics/field_trial.h"
15 #include "base/metrics/histogram_macros.h"
16 #include "base/pending_task.h"
17 #include "base/process/process.h"
18 #include "base/process/process_handle.h"
19 #include "base/stl_util.h"
20 #include "base/strings/string_util.h"
21 #include "base/threading/platform_thread.h"
22
23 namespace base {
24 namespace debug {
25
26 namespace {
27
28 // A number that identifies the memory as having been initialized. It's
29 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker).
30 // A version number is added on so that major structure changes won't try to
31 // read an older version (since the cookie won't match).
32 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1
33
34 // The minimum depth a stack should support.
35 const int kMinStackDepth = 2;
36
37 #if !defined(OS_NACL)
38 const Feature kActivityTrackerFeature{
39 "HangDebugging", FEATURE_DISABLED_BY_DEFAULT
Alexei Svitkine (slow) 2016/08/04 14:27:05 Make the feature name match the constant name (i.e
bcwhite 2016/08/04 14:55:36 Done.
Alexei Svitkine (slow) 2016/08/04 18:16:45 Note done?
bcwhite 2016/08/04 19:15:07 Done, just not uploaded because it became a discus
40 };
41 #endif
42
43 } // namespace
44
45
46 #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
47 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) {
48 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature))
49 return;
50
51 // TODO(bcwhite): Adjust these numbers once there is real data to show
52 // just how much of an arena is necessary.
53 const size_t kMemorySize = 1 << 20; // 1 MiB
54 const int kStackDepth = 4;
55 const uint64_t kAllocatorId = 0;
56
57 GlobalActivityTracker::CreateWithFile(
58 file.AddExtension(PersistentMemoryAllocator::kFileExtension),
59 kMemorySize, kAllocatorId, kActivityTrackerFeature.name, kStackDepth);
60 }
61 #endif // !defined(OS_NACL)
62
63
64 // This information is kept for every thread that is tracked. It is filled
65 // the very first time the thread is seen. All fields must be of exact sizes
66 // so there is no issue moving between 32 and 64-bit builds.
67 struct ThreadActivityTracker::Header {
68 // This unique number indicates a valid initialization of the memory.
69 uint64_t cookie;
70
71 // The process-id and thread-id to which this data belongs. These identifiers
72 // are not guaranteed to mean anything but are unique, in combination, among
73 // all active trackers. It would be nice to always have the process_id be a
74 // 64-bit value but the necessity of having it atomic (for the memory barriers
75 // it provides) limits it to the natural word size of the machine.
76 #ifdef ARCH_CPU_64_BITS
77 std::atomic<int64_t> process_id;
78 #else
79 std::atomic<int32_t> process_id;
80 int32_t process_id_padding;
81 #endif
82
83 union {
84 int64_t as_id;
85 #if defined(OS_WIN)
86 // On Windows, the handle itself is often a pseudo-handle with a common
87 // value meaning "this thread" and so the thread-id is used. The former
88 // can be converted to a thread-id with a system call.
89 PlatformThreadId as_tid;
90 #elif defined(OS_POSIX)
91 // On Posix, the handle is always a unique identifier so no conversion
92 // needs to be done. However, it's value is officially opaque so there
93 // is no one correct way to convert it to a numerical identifier.
94 PlatformThreadHandle::Handle as_handle;
95 #endif
96 } thread_ref;
97
98 // The start-time and start-ticks when the data was created. Each activity
99 // record has a |time_internal| value that can be converted to a "wall time"
100 // with these two values.
101 int64_t start_time;
102 int64_t start_ticks;
103
104 // The number of Activity slots in the data.
105 uint32_t stack_slots;
106
107 // The current depth of the stack. This may be greater than the number of
108 // slots. If the depth exceeds the number of slots, the newest entries
109 // won't be recorded.
110 std::atomic<uint32_t> current_depth;
111
112 // A memory location used to indicate if changes have been made to the stack
113 // that would invalidate an in-progress read of its contents. The active
114 // tracker will zero the value whenever something gets popped from the
115 // stack. A monitoring tracker can write a non-zero value here, copy the
116 // stack contents, and read the value to know, if it is still non-zero, that
117 // the contents didn't change while being copied. This can handle concurrent
118 // snapshot operations only if each snapshot writes a different bit (which
119 // is not the current implementation so no parallel snapshots allowed).
120 std::atomic<uint32_t> stack_unchanged;
121
122 // The name of the thread (up to a maximum length). Dynamic-length names
123 // are not practical since the memory has to come from the same persistent
124 // allocator that holds this structure and to which this object has no
125 // reference.
126 char thread_name[32];
127 };
128
129 // It doesn't matter what is contained in this (though it will be all zeros)
130 // as only the address of it is important.
131 const ThreadActivityTracker::ActivityData
132 ThreadActivityTracker::kNullActivityData = {};
133
134 ThreadActivityTracker::ActivityData
135 ThreadActivityTracker::ActivityData::ForThread(
136 const PlatformThreadHandle& handle) {
137 // Header already has a conversion union; reuse that.
138 ThreadActivityTracker::Header header;
139 header.thread_ref.as_id = 0; // Zero the union in case other is smaller.
140 #if defined(OS_WIN)
141 header.thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
142 #elif defined(OS_POSIX)
143 header.thread_ref.as_handle = handle.platform_handle();
144 #endif
145 return ForThread(header.thread_ref.as_id);
146 }
147
148 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {}
149 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {}
150
151
152 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
153 : header_(static_cast<Header*>(base)),
154 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
155 sizeof(Header))),
156 stack_slots_(
157 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
158 DCHECK(thread_checker_.CalledOnValidThread());
159
160 // Verify the parameters but fail gracefully if they're not valid so that
161 // production code based on external inputs will not crash. IsValid() will
162 // return false in this case.
163 if (!base ||
164 // Ensure there is enough space for the header and at least a few records.
165 size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
166 // Ensure that the |stack_slots_| calculation didn't overflow.
167 (size - sizeof(Header)) / sizeof(Activity) >
168 std::numeric_limits<uint32_t>::max()) {
169 NOTREACHED();
170 return;
171 }
172
173 // Ensure that the thread reference doesn't exceed the size of the ID number.
174 // This won't compile at the global scope because Header is a private struct.
175 static_assert(
176 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
177 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
178
179 // Ensure that the alignment of Activity.data is properly aligned to a
180 // 64-bit boundary so there are no interoperability-issues across cpu
181 // architectures.
182 static_assert(offsetof(Activity, data) % sizeof(uint64_t) == 0,
183 "ActivityData.data is not 64-bit aligned");
184
185 // Provided memory should either be completely initialized or all zeros.
186 if (header_->cookie == 0) {
187 // This is a new file. Double-check other fields and then initialize.
188 DCHECK_EQ(0, header_->process_id.load(std::memory_order_relaxed));
189 DCHECK_EQ(0, header_->thread_ref.as_id);
190 DCHECK_EQ(0, header_->start_time);
191 DCHECK_EQ(0, header_->start_ticks);
192 DCHECK_EQ(0U, header_->stack_slots);
193 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
194 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
195 DCHECK_EQ(0, stack_[0].time_internal);
196 DCHECK_EQ(0U, stack_[0].origin_address);
197 DCHECK_EQ(0U, stack_[0].call_stack[0]);
198 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
199
200 #if defined(OS_WIN)
201 header_->thread_ref.as_tid = PlatformThread::CurrentId();
202 #elif defined(OS_POSIX)
203 header_->thread_ref.as_handle =
204 PlatformThread::CurrentHandle().platform_handle();
205 #endif
206 header_->start_time = base::Time::Now().ToInternalValue();
207 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
208 header_->stack_slots = stack_slots_;
209 strlcpy(header_->thread_name, PlatformThread::GetName(),
210 sizeof(header_->thread_name));
211 header_->cookie = kHeaderCookie;
212
213 // This is done last so as to guarantee that everything above is "released"
214 // by the time this value gets written.
215 header_->process_id.store(GetCurrentProcId(), std::memory_order_release);
216
217 valid_ = true;
218 DCHECK(IsValid());
219 } else {
220 // This is a file with existing data. Perform basic consistency checks.
221 valid_ = true;
222 valid_ = IsValid();
223 }
224 }
225
226 ThreadActivityTracker::~ThreadActivityTracker() {}
227
228 void ThreadActivityTracker::PushActivity(const void* origin,
229 ActivityType type,
230 const ActivityData& data) {
231 // A thread-checker creates a lock to check the thread-id which means
232 // re-entry into this code if lock acquisitions are being tracked.
233 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread());
234
235 // Get the current depth of the stack. No access to other memory guarded
236 // by this variable is done here so a "relaxed" load is acceptable.
237 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
238
239 // Handle the case where the stack depth has exceeded the storage capacity.
240 // Extra entries will be lost leaving only the base of the stack.
241 if (depth >= stack_slots_) {
242 // Since no other threads modify the data, no compare/exchange is needed.
243 // Since no other memory is being modified, a "relaxed" store is acceptable.
244 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
245 return;
246 }
247
248 // Get a pointer to the next activity and load it. No atomicity is required
249 // here because the memory is known only to this thread. It will be made
250 // known to other threads once the depth is incremented.
251 Activity* activity = &stack_[depth];
252 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
253 activity->origin_address = reinterpret_cast<uintptr_t>(origin);
254 activity->activity_type = type;
255 activity->data = data;
256
257 #if defined(SYZYASAN)
258 // Create a stacktrace from the current location and get the addresses.
259 StackTrace stack_trace;
260 size_t stack_depth;
261 const void* const* stack_addrs = stack_trace.Addresses(&stack_depth);
262 // Copy the stack addresses, ignoring the first one (here).
263 size_t i;
264 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) {
265 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]);
266 }
267 activity->call_stack[i - 1] = 0;
268 #else
269 // Since the memory was initially zero and nothing ever overwrites it in
270 // this "else" case, there is no need to write even the null terminator.
271 //activity->call_stack[0] = 0;
272 #endif
273
274 // Save the incremented depth. Because this guards |activity| memory filled
275 // above that may be read by another thread once the recorded depth changes,
276 // a "release" store is required.
277 header_->current_depth.store(depth + 1, std::memory_order_release);
278 }
279
280 void ThreadActivityTracker::ChangeActivity(ActivityType type,
281 const ActivityData& data) {
282 DCHECK(thread_checker_.CalledOnValidThread());
283 DCHECK(type != ACT_NULL || &data != &kNullActivityData);
284
285 // Get the current depth of the stack and acquire the data held there.
286 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
287 DCHECK_LT(0U, depth);
288
289 // Update the information if it is being recorded (i.e. within slot limit).
290 if (depth <= stack_slots_) {
291 Activity* activity = &stack_[depth - 1];
292
293 if (type != ACT_NULL) {
294 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK,
295 type & ACT_CATEGORY_MASK);
296 activity->activity_type = type;
297 }
298
299 if (&data != &kNullActivityData)
300 activity->data = data;
301 }
302 }
303
304 void ThreadActivityTracker::PopActivity() {
305 // Do an atomic decrement of the depth. No changes to stack entries guarded
306 // by this variable are done here so a "relaxed" operation is acceptable.
307 // |depth| will receive the value BEFORE it was modified.
308 uint32_t depth =
309 header_->current_depth.fetch_sub(1, std::memory_order_relaxed);
310
311 // Validate that everything is running correctly.
312 DCHECK_LT(0U, depth);
313
314 // A thread-checker creates a lock to check the thread-id which means
315 // re-entry into this code if lock acquisitions are being tracked.
316 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK_ACQUIRE ||
317 thread_checker_.CalledOnValidThread());
318
319 // The stack has shrunk meaning that some other thread trying to copy the
320 // contents for reporting purposes could get bad data. That thread would
321 // have written a non-zero value into |stack_unchanged|; clearing it here
322 // will let that thread detect that something did change. This needs to
323 // happen after the atomic |depth| operation above so a "release" store
324 // is required.
325 header_->stack_unchanged.store(0, std::memory_order_release);
326 }
327
328 bool ThreadActivityTracker::IsValid() const {
329 if (header_->cookie != kHeaderCookie ||
330 header_->process_id.load(std::memory_order_relaxed) == 0 ||
331 header_->thread_ref.as_id == 0 ||
332 header_->start_time == 0 ||
333 header_->start_ticks == 0 ||
334 header_->stack_slots != stack_slots_ ||
335 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
336 return false;
337 }
338
339 return valid_;
340 }
341
342 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const {
343 DCHECK(output_snapshot);
344
345 // There is no "called on valid thread" check for this method as it can be
346 // called from other threads or even other processes. It is also the reason
347 // why atomic operations must be used in certain places above.
348
349 // It's possible for the data to change while reading it in such a way that it
350 // invalidates the read. Make several attempts but don't try forever.
351 const int kMaxAttempts = 10;
352 uint32_t depth;
353
354 // Stop here if the data isn't valid.
355 if (!IsValid())
356 return false;
357
358 // Allocate the maximum size for the stack so it doesn't have to be done
359 // during the time-sensitive snapshot operation. It is shrunk once the
360 // actual size is known.
361 output_snapshot->activity_stack.reserve(stack_slots_);
362
363 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
364 // Remember the process and thread IDs to ensure they aren't replaced
365 // during the snapshot operation. Use "acquire" to ensure that all the
366 // non-atomic fields of the structure are valid (at least at the current
367 // moment in time).
368 const int64_t starting_process_id =
369 header_->process_id.load(std::memory_order_acquire);
370 const int64_t starting_thread_id = header_->thread_ref.as_id;
371
372 // Write a non-zero value to |stack_unchanged| so it's possible to detect
373 // at the end that nothing has changed since copying the data began. A
374 // "cst" operation is required to ensure it occurs before everything else.
375 // Using "cst" memory ordering is relatively expensive but this is only
376 // done during analysis so doesn't directly affect the worker threads.
377 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
378
379 // Fetching the current depth also "acquires" the contents of the stack.
380 depth = header_->current_depth.load(std::memory_order_acquire);
381 uint32_t count = std::min(depth, stack_slots_);
382 output_snapshot->activity_stack.resize(count);
383 if (count > 0) {
384 // Copy the existing contents. Memcpy is used for speed.
385 memcpy(&output_snapshot->activity_stack[0], stack_,
386 count * sizeof(Activity));
387 }
388
389 // Retry if something changed during the copy. A "cst" operation ensures
390 // it must happen after all the above operations.
391 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
392 continue;
393
394 // Stack copied. Record it's full depth.
395 output_snapshot->activity_stack_depth = depth;
396
397 // TODO(bcwhite): Snapshot other things here.
398
399 // Get the general thread information. Loading of "process_id" is guaranteed
400 // to be last so that it's possible to detect below if any content has
401 // changed while reading it. It's technically possible for a thread to end,
402 // have its data cleared, a new thread get created with the same IDs, and
403 // it perform an action which starts tracking all in the time since the
404 // ID reads above but the chance is so unlikely that it's not worth the
405 // effort and complexity of protecting against it (perhaps with an
406 // "unchanged" field like is done for the stack).
407 output_snapshot->thread_name =
408 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
409 output_snapshot->thread_id = header_->thread_ref.as_id;
410 output_snapshot->process_id =
411 header_->process_id.load(std::memory_order_seq_cst);
412
413 // All characters of the thread-name buffer were copied so as to not break
414 // if the trailing NUL were missing. Now limit the length if the actual
415 // name is shorter.
416 output_snapshot->thread_name.resize(
417 strlen(output_snapshot->thread_name.c_str()));
418
419 // If the process or thread ID has changed then the tracker has exited and
420 // the memory reused by a new one. Try again.
421 if (output_snapshot->process_id != starting_process_id ||
422 output_snapshot->thread_id != starting_thread_id) {
423 continue;
424 }
425
426 // Only successful if the data is still valid once everything is done since
427 // it's possible for the thread to end somewhere in the middle and all its
428 // values become garbage.
429 if (!IsValid())
430 return false;
431
432 // Change all the timestamps in the activities from "ticks" to "wall" time.
433 const Time start_time = Time::FromInternalValue(header_->start_time);
434 const int64_t start_ticks = header_->start_ticks;
435 for (Activity& activity : output_snapshot->activity_stack) {
436 activity.time_internal =
437 (start_time +
438 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
439 .ToInternalValue();
440 }
441
442 // Success!
443 return true;
444 }
445
446 // Too many attempts.
447 return false;
448 }
449
450 // static
451 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
452 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
453 }
454
455
456 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
457
458 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
459 PersistentMemoryAllocator::Reference mem_reference,
460 void* base,
461 size_t size)
462 : ThreadActivityTracker(base, size),
463 mem_reference_(mem_reference),
464 mem_base_(base) {}
465
466 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
467 // The global |g_tracker_| must point to the owner of this class since all
468 // objects of this type must be destructed before |g_tracker_| can be changed
469 // (something that only occurs in tests).
470 DCHECK(g_tracker_);
471 g_tracker_->ReturnTrackerMemory(this);
472 }
473
474 void GlobalActivityTracker::CreateWithAllocator(
475 std::unique_ptr<PersistentMemoryAllocator> allocator,
476 int stack_depth) {
477 // There's no need to do anything with the result. It is self-managing.
478 GlobalActivityTracker* global_tracker =
479 new GlobalActivityTracker(std::move(allocator), stack_depth);
480 // Create a tracker for this thread since it is known.
481 global_tracker->CreateTrackerForCurrentThread();
482 }
483
484 #if !defined(OS_NACL)
485 // static
486 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
487 size_t size,
488 uint64_t id,
489 StringPiece name,
490 int stack_depth) {
491 DCHECK(!file_path.empty());
492 DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size);
493
494 // Create and map the file into memory and make it globally available.
495 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
496 bool success =
497 mapped_file->Initialize(File(file_path,
498 File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
499 File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
500 {0, static_cast<int64_t>(size)},
501 MemoryMappedFile::READ_WRITE_EXTEND);
502 DCHECK(success);
503 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator(
504 std::move(mapped_file), size, id, name, false)),
505 stack_depth);
506 }
507 #endif // !defined(OS_NACL)
508
509 // static
510 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
511 uint64_t id,
512 StringPiece name,
513 int stack_depth) {
514 CreateWithAllocator(
515 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
516 stack_depth);
517 }
518
519 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
520 DCHECK(!this_thread_tracker_.Get());
521
522 PersistentMemoryAllocator::Reference mem_reference = 0;
523 void* mem_base = nullptr;
524
525 // Get the current count of available memories, acquiring the array values.
526 int count = available_memories_count_.load(std::memory_order_acquire);
527 while (count > 0) {
528 // There is a memory block that was previously released (and zeroed) so
529 // just re-use that rather than allocating a new one. Use "relaxed" because
530 // the value is guarded by the |count| "acquire". A zero reference replaces
531 // the existing value so that it can't be used by another thread that
532 // manages to interrupt this one before the count can be decremented.
533 // A zero reference is also required for the "push" operation to work
534 // once the count finally does get decremented.
535 mem_reference =
536 available_memories_[count - 1].exchange(0, std::memory_order_relaxed);
537
538 // If the reference is zero, it's already been taken but count hasn't yet
539 // been decremented. Give that other thread a chance to finish then reload
540 // the "count" value and try again.
541 if (!mem_reference) {
542 PlatformThread::YieldCurrentThread();
543 count = available_memories_count_.load(std::memory_order_acquire);
544 continue;
545 }
546
547 // Decrement the count indicating that the value has been taken. If this
548 // fails then another thread has pushed something new and incremented the
549 // count.
550 // NOTE: |oldcount| will be loaded with the existing value.
551 int oldcount = count;
552 if (!available_memories_count_.compare_exchange_strong(
553 oldcount, count - 1, std::memory_order_acquire,
554 std::memory_order_acquire)) {
555 DCHECK_LT(count, oldcount);
556
557 // Restore the reference that was zeroed above and try again.
558 available_memories_[count - 1].store(mem_reference,
559 std::memory_order_relaxed);
560 count = oldcount;
561 continue;
562 }
563
564 // Turn the reference back into one of the activity-tracker type.
565 mem_base = allocator_->GetAsObject<char>(mem_reference,
566 kTypeIdActivityTrackerFree);
567 DCHECK(mem_base);
568 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
569 bool changed = allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
570 kTypeIdActivityTrackerFree);
571 DCHECK(changed);
572
573 // Success.
574 break;
575 }
576
577 // Handle the case where no previously-used memories are available.
578 if (count == 0) {
579 // Allocate a block of memory from the persistent segment.
580 mem_reference =
581 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker);
582 if (mem_reference) {
583 // Success. Convert the reference to an actual memory address.
584 mem_base =
585 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
586 // Make the allocation iterable so it can be found by other processes.
587 allocator_->MakeIterable(mem_reference);
588 } else {
589 // Failure. This shouldn't happen.
590 NOTREACHED();
591 // But if it does, probably because the allocator wasn't given enough
592 // memory to satisfy all possible requests, handle it gracefully by
593 // allocating the required memory from the heap.
594 mem_base = new char[stack_memory_size_];
595 memset(mem_base, 0, stack_memory_size_);
596 // Report the thread-count at which the allocator was full so that the
597 // failure can be seen and underlying memory resized appropriately.
598 UMA_HISTOGRAM_COUNTS_1000(
599 "Debug.ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
600 thread_tracker_count_.load(std::memory_order_relaxed));
601 }
602 }
603
604 // Create a tracker with the acquired memory and set it as the tracker
605 // for this particular thread in thread-local-storage.
606 DCHECK(mem_base);
607 ManagedActivityTracker* tracker =
608 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
609 DCHECK(tracker->IsValid());
610 this_thread_tracker_.Set(tracker);
611 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
612
613 UMA_HISTOGRAM_ENUMERATION("Debug.ActivityTracker.ThreadTrackers.Count",
Alexei Svitkine (slow) 2016/08/04 14:27:05 Maybe HangDebugging.? Or HangWatcher. or something
manzagop (departed) 2016/08/04 14:35:46 The plan is to use this "internal state representa
bcwhite 2016/08/04 14:55:36 The UMA name should match the class, should it not
614 old_count + 1, kMaxThreadCount);
615 return tracker;
616 }
617
618 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
619 ThreadActivityTracker* tracker =
620 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
621 if (tracker) {
622 this_thread_tracker_.Free();
623 delete tracker;
624 }
625 }
626
627 GlobalActivityTracker::GlobalActivityTracker(
628 std::unique_ptr<PersistentMemoryAllocator> allocator,
629 int stack_depth)
630 : allocator_(std::move(allocator)),
631 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
632 this_thread_tracker_(&OnTLSDestroy),
633 thread_tracker_count_(0),
634 available_memories_count_(0) {
635 // Clear the available-memories array.
636 memset(available_memories_, 0, sizeof(available_memories_));
637
638 // Ensure the passed memory is valid and empty (iterator finds nothing).
639 uint32_t type;
640 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
641
642 // Ensure that there is no other global object and then make this one such.
643 DCHECK(!g_tracker_);
644 g_tracker_ = this;
645 }
646
647 GlobalActivityTracker::~GlobalActivityTracker() {
648 DCHECK_EQ(g_tracker_, this);
649 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
650 g_tracker_ = nullptr;
651 }
652
653 void GlobalActivityTracker::ReturnTrackerMemory(
654 ManagedActivityTracker* tracker) {
655 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
656 void* mem_base = tracker->mem_base_;
657
658 // Zero the memory so that it is ready for use if needed again later. It's
659 // better to clear the memory now, when a thread is exiting, than to do it
660 // when it is first needed by a thread doing actual work.
661 memset(mem_base, 0, stack_memory_size_);
662
663 // Remove the destructed tracker from the set of known ones.
664 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
665 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
666
667 // Deal with the memory that was used by the tracker.
668 if (mem_reference) {
669 // The memory was within the persistent memory allocator. Change its type
670 // so that iteration won't find it.
671 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
672 kTypeIdActivityTracker);
673 // There is no way to free memory from a persistent allocator so instead
674 // push it on the internal list of available memory blocks.
675 while (true) {
676 // Get the existing count of available memories and ensure we won't
677 // burst the array. Acquire the values in the array.
678 int count = available_memories_count_.load(std::memory_order_acquire);
679 if (count >= kMaxThreadCount) {
680 NOTREACHED();
681 // Storage is full. Just forget about this memory. It won't be re-used
682 // but there's no real loss.
683 break;
684 }
685
686 // Write the reference of the memory being returned to this slot in the
687 // array. Empty slots have a value of zero so do an atomic compare-and-
688 // exchange to ensure that a race condition doesn't exist with another
689 // thread doing the same.
690 PersistentMemoryAllocator::Reference mem_expected = 0;
691 if (!available_memories_[count].compare_exchange_strong(
692 mem_expected, mem_reference, std::memory_order_release,
693 std::memory_order_relaxed)) {
694 PlatformThread::YieldCurrentThread();
695 continue; // Try again.
696 }
697
698 // Increment the count, releasing the value written to the array. This
699 // could fail if a simultaneous "pop" operation decremented the counter.
700 // If that happens, clear the array slot and start over. Do a "strong"
701 // exchange to avoid spurious retries that can occur with a "weak" one.
702 int expected = count; // Updated by compare/exchange.
703 if (!available_memories_count_.compare_exchange_strong(
704 expected, count + 1, std::memory_order_release,
705 std::memory_order_relaxed)) {
706 available_memories_[count].store(0, std::memory_order_relaxed);
707 continue;
708 }
709
710 // Count was successfully incremented to reflect the newly added value.
711 break;
712 }
713 } else {
714 // The memory was allocated from the process heap. This shouldn't happen
715 // because the persistent memory segment should be big enough for all
716 // thread stacks but it's better to support falling back to allocation
717 // from the heap rather than crash. Everything will work as normal but
718 // the data won't be persisted.
719 delete[] reinterpret_cast<char*>(mem_base);
720 }
721 }
722
723 // static
724 void GlobalActivityTracker::OnTLSDestroy(void* value) {
725 delete reinterpret_cast<ManagedActivityTracker*>(value);
726 }
727
728
729 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
730 uint8_t action,
731 uint32_t id,
732 int32_t info)
733 : GlobalActivityTracker::ScopedThreadActivity(
734 location.program_counter(),
735 static_cast<ThreadActivityTracker::ActivityType>(
736 ThreadActivityTracker::ACT_GENERIC | action),
737 ThreadActivityTracker::ActivityData::ForGeneric(id, info),
738 /*lock_allowed=*/true),
739 id_(id) {
740 // The action must not affect the category bits of the activity type.
741 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
742 }
743
744 void ScopedActivity::ChangeAction(uint8_t action) {
745 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
746 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
747 ThreadActivityTracker::ACT_GENERIC | action),
748 ThreadActivityTracker::kNullActivityData);
749 }
750
751 void ScopedActivity::ChangeInfo(int32_t info) {
752 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL,
753 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
754 }
755
756 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
757 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
758 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
759 ThreadActivityTracker::ACT_GENERIC | action),
760 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
761 }
762
763 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task)
764 : GlobalActivityTracker::ScopedThreadActivity(
765 task.posted_from.program_counter(),
766 ThreadActivityTracker::ACT_TASK_RUN,
767 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num),
768 /*lock_allowed=*/true) {}
769
770 ScopedLockAcquireActivity::ScopedLockAcquireActivity(
771 const base::internal::LockImpl* lock)
772 : GlobalActivityTracker::ScopedThreadActivity(
773 nullptr,
774 ThreadActivityTracker::ACT_LOCK_ACQUIRE,
775 ThreadActivityTracker::ActivityData::ForLock(lock),
776 /*lock_allowed=*/false) {}
777
778 ScopedEventWaitActivity::ScopedEventWaitActivity(
779 const base::WaitableEvent* event)
780 : GlobalActivityTracker::ScopedThreadActivity(
781 nullptr,
782 ThreadActivityTracker::ACT_EVENT_WAIT,
783 ThreadActivityTracker::ActivityData::ForEvent(event),
784 /*lock_allowed=*/true) {}
785
786 ScopedThreadJoinActivity::ScopedThreadJoinActivity(
787 const base::PlatformThreadHandle* thread)
788 : GlobalActivityTracker::ScopedThreadActivity(
789 nullptr,
790 ThreadActivityTracker::ACT_THREAD_JOIN,
791 ThreadActivityTracker::ActivityData::ForThread(*thread),
792 /*lock_allowed=*/true) {}
793
794 #if !defined(OS_NACL) && !defined(OS_IOS)
795 ScopedProcessWaitActivity::ScopedProcessWaitActivity(
796 const base::Process* process)
797 : GlobalActivityTracker::ScopedThreadActivity(
798 nullptr,
799 ThreadActivityTracker::ACT_PROCESS_WAIT,
800 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()),
801 /*lock_allowed=*/true) {}
802 #endif
803
804 } // namespace debug
805 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698