Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(27)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 1980743002: Track thread activities in order to diagnose hangs. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@readwrite-mmf
Patch Set: more clean-up and addressed review comments Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/activity_tracker.h"
6
7 #include <atomic>
8
9 #include "base/feature_list.h"
10 #include "base/files/file.h"
11 #include "base/files/file_path.h"
12 #include "base/files/memory_mapped_file.h"
13 #include "base/logging.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/metrics/field_trial.h"
16 #include "base/metrics/histogram_macros.h"
17 #include "base/pending_task.h"
18 #include "base/process/process.h"
19 #include "base/process/process_handle.h"
20 #include "base/stl_util.h"
21 #include "base/strings/string_util.h"
22 #include "base/threading/platform_thread.h"
23
24 namespace base {
25 namespace debug {
26
27 namespace {
28
29 // A number that identifies the memory as having been initialized. It's
30 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker).
31 // A version number is added on so that major structure changes won't try to
32 // read an older version (since the cookie won't match).
33 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1
34
35 // The minimum depth a stack should support.
36 const int kMinStackDepth = 2;
37
38 } // namespace
39
40
41 #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
42 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) {
43 const Feature kActivityTrackerFeature{
44 "ActivityTracking", FEATURE_DISABLED_BY_DEFAULT
45 };
46
47 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature))
48 return;
49
50 // TODO(bcwhite): Adjust these numbers once there is real data to show
51 // just how much of an arena is necessary.
52 const size_t kMemorySize = 1 << 20; // 1 MiB
53 const int kStackDepth = 4;
54 const uint64_t kAllocatorId = 0;
55 const char kAllocatorName[] = "ActivityTracker";
56
57 GlobalActivityTracker::CreateWithFile(
58 file.AddExtension(PersistentMemoryAllocator::kFileExtension),
59 kMemorySize, kAllocatorId, kAllocatorName, kStackDepth);
60 }
61 #endif // !defined(OS_NACL)
62
63
64 // This information is kept for every thread that is tracked. It is filled
65 // the very first time the thread is seen. All fields must be of exact sizes
66 // so there is no issue moving between 32 and 64-bit builds.
67 struct ThreadActivityTracker::Header {
68 // This unique number indicates a valid initialization of the memory.
69 uint64_t cookie;
70
71 // The process-id and thread-id to which this data belongs. These identifiers
72 // are not guaranteed to mean anything but are unique, in combination, among
73 // all active trackers.
74 int64_t process_id;
75 union {
76 int64_t as_id;
77 PlatformThreadHandle::Handle as_handle;
78 } thread_ref;
79
80 // The start-time and start-ticks when the data was created. Each activity
81 // record has a |time_internal| value that can be converted to a "wall time"
82 // with these two values.
83 int64_t start_time;
84 int64_t start_ticks;
85
86 // The number of Activity slots in the data.
87 uint32_t stack_slots;
88
89 // The current depth of the stack. This may be greater than the number of
90 // slots. If the depth exceeds the number of slots, the newest entries
91 // won't be recorded.
92 std::atomic<uint32_t> current_depth;
93
94 // A memory location used to indicate if changes have been made to the stack
95 // that would invalidate an in-progress read of its contents. The active
96 // tracker will zero the value whenever something gets popped from the
97 // stack. A monitoring tracker can write a non-zero value here, copy the
98 // stack contents, and read the value to know, if it is still non-zero, that
99 // the contents didn't change while being copied.
100 std::atomic<uint32_t> stack_unchanged;
101
102 // The name of the thread (up to a maximum length). Dynamic-length names
103 // are not practical since the memory has to come from the same persistent
104 // allocator that holds this structure and to which this object has no
105 // reference.
106 char thread_name[32];
107 };
108
109 // It doesn't matter what is contained in this (though it will be all zeros)
110 // as only the address of it is important.
111 const ThreadActivityTracker::ActivityData
112 ThreadActivityTracker::kNullActivityData = {};
113
114 ThreadActivityTracker::ActivityData
115 ThreadActivityTracker::ActivityData::ForThread(
116 const PlatformThreadHandle& handle) {
117 // Header already has a conversion union; reuse that.
118 ThreadActivityTracker::Header header;
119 header.thread_ref.as_id = 0; // Zero the union in case as_handle is smaller.
120 header.thread_ref.as_handle = handle.platform_handle();
121 return ForThread(header.thread_ref.as_id);
122 }
123
124 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {}
125 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {}
126
127
128 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
129 : header_(static_cast<Header*>(base)),
130 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
131 sizeof(Header))),
132 stack_slots_(
133 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
134 DCHECK(thread_checker_.CalledOnValidThread());
135
136 // Verify the parameters but fail gracefully if they're not valid so that
137 // production code based on external inputs will not crash. IsValid() will
138 // return false in this case.
139 if (!base ||
140 // Ensure there is enough space for the header and at least a few records.
141 size < sizeof(Header) + kMinStackDepth * sizeof(Activity) ||
142 // Ensure that the |stack_slots_| calculation didn't overflow.
143 (size - sizeof(Header)) / sizeof(Activity) >
144 std::numeric_limits<uint32_t>::max()) {
145 NOTREACHED();
146 return;
147 }
148
149 // Ensure that the thread reference doesn't exceed the size of the ID number.
150 // This won't compile at the global scope because Header is a private struct.
151 static_assert(
152 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id),
153 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID");
154
155 // Provided memory should either be completely initialized or all zeros.
156 if (header_->cookie == 0) {
157 // This is a new file. Double-check other fields and then initialize.
158 DCHECK_EQ(0, header_->process_id);
159 DCHECK_EQ(0, header_->thread_ref.as_id);
160 DCHECK_EQ(0, header_->start_time);
161 DCHECK_EQ(0, header_->start_ticks);
162 DCHECK_EQ(0U, header_->stack_slots);
163 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
164 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
165 DCHECK_EQ(0, stack_[0].time_internal);
166 DCHECK_EQ(0U, stack_[0].source_address);
167 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
168
169 header_->process_id = GetCurrentProcId();
170 header_->thread_ref.as_handle =
171 PlatformThread::CurrentHandle().platform_handle();
172 header_->start_time = base::Time::Now().ToInternalValue();
173 header_->start_ticks = base::TimeTicks::Now().ToInternalValue();
174 header_->stack_slots = stack_slots_;
175 strlcpy(header_->thread_name, PlatformThread::GetName(),
176 sizeof(header_->thread_name));
177 header_->cookie = kHeaderCookie;
178 valid_ = true;
179 DCHECK(IsValid());
180 } else {
181 // This is a file with existing data. Perform basic consistency checks.
182 valid_ = true;
183 valid_ = IsValid();
184 }
185 }
186
187 ThreadActivityTracker::~ThreadActivityTracker() {}
188
189 void ThreadActivityTracker::PushActivity(const void* source,
190 ActivityType type,
191 const ActivityData& data) {
192 // A thread-checker creates a lock to check the thread-id which means
193 // re-entry into this code if lock acquisitions are being tracked.
194 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread());
195
196 // Get the current depth of the stack. No access to other memory guarded
197 // by this variable is done here so a "relaxed" load is acceptable.
198 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
199
200 // Handle the case where the stack depth has exceeded the storage capacity.
201 // Extra entries will be lost leaving only the base of the stack.
202 if (depth >= stack_slots_) {
203 // Since no other threads modify the data, no compare/exchange is needed.
204 // Since no other memory is being modified, a "relaxed" store is acceptable.
205 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
206 return;
207 }
208
209 // Get a pointer to the next activity and load it. No atomicity is required
210 // here because the memory is known only to this thread. It will be made
211 // known to other threads once the depth is incremented.
212 Activity* activity = &stack_[depth];
213 activity->time_internal = base::TimeTicks::Now().ToInternalValue();
214 activity->source_address = reinterpret_cast<uintptr_t>(source);
215 activity->activity_type = type;
216 activity->data = data;
217
218 // Save the incremented depth. Because this guards |activity| memory filled
219 // above that may be read by another thread once the recorded depth changes,
220 // a "release" store is required.
221 header_->current_depth.store(depth + 1, std::memory_order_release);
222 }
223
224 void ThreadActivityTracker::ChangeActivity(const void* source,
225 ActivityType type,
226 const ActivityData& data) {
227 DCHECK(thread_checker_.CalledOnValidThread());
228 DCHECK(type != ACT_NULL || &data != &kNullActivityData);
229
230 // Get the current depth of the stack.
231 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
232 DCHECK_LT(0U, depth);
233
234 // Update the information if it is being recorded (i.e. within slot limit).
235 if (depth <= stack_slots_) {
236 Activity* activity = &stack_[depth - 1];
237 DCHECK_EQ(reinterpret_cast<uintptr_t>(source), activity->source_address);
238
239 if (type != ACT_NULL) {
240 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK,
241 type & ACT_CATEGORY_MASK);
242 activity->activity_type = type;
243 }
244
245 if (&data != &kNullActivityData)
246 activity->data = data;
247 }
248 }
249
250 void ThreadActivityTracker::PopActivity(const void* source) {
251 // Do an atomic decrement of the depth. No changes to stack entries guarded
252 // by this variable are done here so a "relaxed" operation is acceptable.
253 // |depth| will receive the value BEFORE it was modified.
254 uint32_t depth =
255 header_->current_depth.fetch_sub(1, std::memory_order_relaxed);
256
257 // Validate that everything is running correctly.
258 DCHECK_LT(0U, depth);
259 if (depth <= stack_slots_) {
260 DCHECK_EQ(reinterpret_cast<uintptr_t>(source),
261 stack_[depth - 1].source_address);
262 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK_ACQUIRE ||
263 thread_checker_.CalledOnValidThread());
264 }
265
266 // The stack has shrunk meaning that some other thread trying to copy the
267 // contents for reporting purposes could get bad data. That thread would
268 // have written a non-zero value into |stack_unchanged|; clearing it here
269 // will let that thread detect that something did change. This needs to
270 // happen after the atomic |depth| operation above so a "release" store
271 // is required.
272 header_->stack_unchanged.store(0, std::memory_order_release);
273 }
274
275 bool ThreadActivityTracker::IsValid() const {
276 if (header_->cookie != kHeaderCookie ||
277 header_->process_id == 0 ||
278 header_->thread_ref.as_id == 0 ||
279 header_->start_time == 0 ||
280 header_->start_ticks == 0 ||
281 header_->stack_slots != stack_slots_ ||
282 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
283 return false;
284 }
285
286 return valid_;
287 }
288
289 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const {
290 DCHECK(output_snapshot);
291
292 // There is no "called on valid thread" check for this method as it can be
293 // called from other threads or even other processes. It is also the reason
294 // why atomic operations must be used in certain places above.
295
296 // It's possible for the data to change while reading it in such a way that it
297 // invalidates the read. Make several attempts but don't try forever.
298 const int kMaxAttempts = 10;
299 uint32_t depth;
300
301 // Stop here if the data isn't valid.
302 if (!IsValid())
303 return false;
304
305 // Start with an empty return stack.
306 output_snapshot->activity_stack.clear();
307
308 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
309 // Remember the process and thread IDs to ensure they aren't replaced
310 // during the snapshot operation.
311 const int64_t starting_process_id = header_->process_id;
312 const int64_t starting_thread_id = header_->thread_ref.as_id;
313
314 // Write a non-zero value to |stack_unchanged| so it's possible to detect
315 // at the end that nothing has changed since copying the data began. A
316 // "cst" operation is required to ensure it occurs before everything else.
317 header_->stack_unchanged.store(1, std::memory_order_seq_cst);
318
319 // Fetching the current depth also "acquires" the contents of the stack.
320 depth = header_->current_depth.load(std::memory_order_acquire);
321 if (depth > 0) {
322 // Copy the existing contents. Memcpy is used for speed.
323 uint32_t count = std::min(depth, stack_slots_);
324 output_snapshot->activity_stack.resize(count);
325 memcpy(&output_snapshot->activity_stack[0], stack_,
326 count * sizeof(Activity));
327 }
328
329 // Retry if something changed during the copy. A "cst" operation ensures
330 // it must happen after all the above operations.
331 if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
332 continue;
333
334 // Stack copied. Record it's full depth.
335 output_snapshot->activity_stack_depth = depth;
336
337 // TODO(bcwhite): Snapshot other things here.
338
339 // Get the general thread information.
340 output_snapshot->process_id = header_->process_id;
341 output_snapshot->thread_id = header_->thread_ref.as_id;
342 output_snapshot->thread_name =
343 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
344
345 // All characters of the thread-name buffer were copied so as to not break
346 // if the trailing NUL were missing. Now limit the length if the actual
347 // name is shorter.
348 output_snapshot->thread_name.resize(
349 strlen(output_snapshot->thread_name.c_str()));
350
351 // If the process or thread ID has changed then the tracker has exited and
352 // the memory reused by a new one. Try again.
353 if (output_snapshot->process_id != starting_process_id ||
354 output_snapshot->thread_id != starting_thread_id) {
355 continue;
356 }
357
358 // Only successful if the data is still valid once everything is done since
359 // it's possible for the thread to end somewhere in the middle and all its
360 // values become garbage.
361 if (!IsValid())
362 return false;
363
364 // Change all the timestamps in the activities from "ticks" to "wall" time.
365 const Time start_time = Time::FromInternalValue(header_->start_time);
366 const int64_t start_ticks = header_->start_ticks;
367 for (Activity& activity : output_snapshot->activity_stack) {
368 activity.time_internal =
369 (start_time +
370 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
371 .ToInternalValue();
372 }
373
374 // Success!
375 return true;
376 }
377
378 // Too many attempts.
379 return false;
380 }
381
382 // static
383 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
384 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
385 }
386
387
388 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
389
390 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
391 PersistentMemoryAllocator::Reference mem_reference,
392 void* base,
393 size_t size)
394 : ThreadActivityTracker(base, size),
395 mem_reference_(mem_reference),
396 mem_base_(base) {}
397
398 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() {
399 // The global |g_tracker_| must point to the owner of this class since all
400 // objects of this type must be destructed before |g_tracker_| can be changed
401 // (something that only occurs in tests).
402 DCHECK(g_tracker_);
403 g_tracker_->ReturnTrackerMemory(this);
404 }
405
406 void GlobalActivityTracker::CreateWithAllocator(
407 std::unique_ptr<PersistentMemoryAllocator> allocator,
408 int stack_depth) {
409 // There's no need to do anything with the result. It is self-managing.
410 GlobalActivityTracker* global_tracker =
411 new GlobalActivityTracker(std::move(allocator), stack_depth);
412 // Create a tracker for this thread since it is known.
413 global_tracker->CreateTrackerForCurrentThread();
414 }
415
416 #if !defined(OS_NACL)
417 // static
418 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path,
419 size_t size,
420 uint64_t id,
421 StringPiece name,
422 int stack_depth) {
423 DCHECK(!file_path.empty());
424 DCHECK_GE(std::numeric_limits<int64_t>::max(), size);
425
426 // Create and map the file into memory and make it globally available.
427 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile());
428 bool success =
429 mapped_file->Initialize(File(file_path,
430 File::FLAG_CREATE_ALWAYS | File::FLAG_READ |
431 File::FLAG_WRITE | File::FLAG_SHARE_DELETE),
432 {0, static_cast<int64_t>(size)},
433 MemoryMappedFile::READ_WRITE_EXTEND);
434 DCHECK(success);
435 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator(
436 std::move(mapped_file), size, id, name, false)),
437 stack_depth);
438 }
439 #endif // !defined(OS_NACL)
440
441 // static
442 void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
443 uint64_t id,
444 StringPiece name,
445 int stack_depth) {
446 CreateWithAllocator(
447 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)),
448 stack_depth);
449 }
450
451 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
452 DCHECK(!this_thread_tracker_.Get());
453
454 PersistentMemoryAllocator::Reference mem_reference = 0;
455 void* mem_base = nullptr;
456
457 // Get the current count of available memories, acquiring the array values.
458 int count = available_memories_count_.load(std::memory_order_acquire);
459 while (count > 0) {
460 // There is a memory block that was previously released (and zero'd) so
461 // just re-use that rather than allocating a new one. Use "acquire" so
462 // operations below can be re-ordered above.
463 mem_reference =
464 available_memories_[count - 1].load(std::memory_order_acquire);
465 DCHECK(mem_reference);
466
467 // Decrement the count indicating that the value has been taken. If this
468 // fails then something else, another thread doing push or pop, has changed
469 // the stack; retry if so.
470 // NOTE: |count| will be loaded with the existing value and affect the
471 // "while" condition.
472 if (!available_memories_count_.compare_exchange_weak(
473 count, count - 1, std::memory_order_acquire,
474 std::memory_order_acquire)) {
475 continue;
476 }
477
478 // Clear the value just read from the array so that the "push" operation
479 // knows there is no value there and will work correctly.
480 available_memories_[count - 1].store(0, std::memory_order_relaxed);
481
482 // Turn the reference back into one of the activity-tracker type.
483 mem_base = allocator_->GetAsObject<char>(mem_reference,
484 kTypeIdActivityTrackerFree);
485 DCHECK(mem_base);
486 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
487 allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
488 kTypeIdActivityTrackerFree);
489
490 // Success.
491 break;
492 }
493
494 // Handle the case where no previously-used memories are available.
495 if (count == 0) {
496 // Allocate a block of memory from the persistent segment.
497 mem_reference =
498 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker);
499 if (mem_reference) {
500 // Success. Convert the reference to an actual memory address.
501 mem_base =
502 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
503 // Make the allocation iterable so it can be found by other processes.
504 allocator_->MakeIterable(mem_reference);
505 } else {
506 // Failure. This shouldn't happen.
507 NOTREACHED();
508 // But if it does, probably because the allocator wasn't given enough
509 // memory to satisfy all possible requests, handle it gracefully by
510 // allocating the required memory from the heap.
511 mem_base = new char[stack_memory_size_];
512 memset(mem_base, 0, stack_memory_size_);
513 // Report the thread-count at which the allocator was full so that the
514 // failure can be seen and underlying memory resized appropriately.
515 UMA_HISTOGRAM_COUNTS_1000(
516 "UMA.ActivityTracker.ThreadTrackers.MemLimit",
517 thread_tracker_count_.load(std::memory_order_relaxed));
518 }
519 }
520
521 // Create a tracker with the acquired memory and set it as the tracker
522 // for this particular thread in thread-local-storage.
523 DCHECK(mem_base);
524 ManagedActivityTracker* tracker =
525 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_);
526 DCHECK(tracker->IsValid());
527 this_thread_tracker_.Set(tracker);
528 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
529
530 UMA_HISTOGRAM_ENUMERATION("UMA.ActivityTracker.ThreadTrackers.Count",
531 old_count + 1, kMaxThreadCount);
532 return tracker;
533 }
534
535 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
536 ThreadActivityTracker* tracker =
537 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
538 if (tracker) {
539 this_thread_tracker_.Free();
540 delete tracker;
541 }
542 }
543
544 GlobalActivityTracker::GlobalActivityTracker(
545 std::unique_ptr<PersistentMemoryAllocator> allocator,
546 int stack_depth)
547 : allocator_(std::move(allocator)),
548 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
549 this_thread_tracker_(&OnTLSDestroy),
550 thread_tracker_count_(0),
551 available_memories_count_(0) {
552 // Clear the available-memories array.
553 memset(available_memories_, 0, sizeof(available_memories_));
554
555 // Ensure the passed memory is valid and empty (iterator finds nothing).
556 uint32_t type;
557 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
558
559 // Ensure that there is no other global object and then make this one such.
560 DCHECK(!g_tracker_);
561 g_tracker_ = this;
562 }
563
564 GlobalActivityTracker::~GlobalActivityTracker() {
565 DCHECK_EQ(g_tracker_, this);
566 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed));
567 g_tracker_ = nullptr;
568 }
569
570 void GlobalActivityTracker::ReturnTrackerMemory(
571 ManagedActivityTracker* tracker) {
572 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_;
573 void* mem_base = tracker->mem_base_;
574
575 // Zero the memory so that it is ready for use if needed again later. It's
576 // better to clear the memory now, when a thread is exiting, than to do it
577 // when it is first needed by a thread doing actual work.
578 memset(mem_base, 0, stack_memory_size_);
579
580 // Remove the destructed tracker from the set of known ones.
581 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
582 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
583
584 // Deal with the memory that was used by the tracker.
585 if (mem_reference) {
586 // The memory was within the persistent memory allocator. Change its type
587 // so that iteration won't find it.
588 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
589 kTypeIdActivityTracker);
590 // There is no way to free memory from a persistent allocator so instead
591 // push it on the internal list of available memory blocks.
592 while (true) {
593 // Get the existing count of available memories and ensure we won't
594 // burst the array. Acquire the values in the array.
595 int count = available_memories_count_.load(std::memory_order_acquire);
596 if (count >= kMaxThreadCount) {
597 NOTREACHED();
598 // Storage is full. Just forget about this memory. It won't be re-used
599 // but there's no real loss.
600 break;
601 }
602
603 // Write the reference of the memory being returned to this slot in the
604 // array. Empty slots have a value of zero so do an atomic compare-and-
605 // exchange to ensure that a race condition doesn't exist with another
606 // thread doing the same.
607 PersistentMemoryAllocator::Reference mem_expected = 0;
608 if (!available_memories_[count].compare_exchange_weak(
609 mem_expected, mem_reference, std::memory_order_release,
610 std::memory_order_relaxed)) {
611 continue; // Try again.
612 }
613
614 // Increment the count, releasing the value written to the array. This
615 // could fail if a simultaneous "pop" operation decremented the counter.
616 // If that happens, clear the array slot and start over. Do a "strong"
617 // exchange to avoid spurious retries that can occur with a "weak" one.
618 int expected = count; // Updated by compare/exchange.
619 if (!available_memories_count_.compare_exchange_strong(
620 expected, count + 1, std::memory_order_release,
621 std::memory_order_relaxed)) {
622 available_memories_[count].store(0, std::memory_order_relaxed);
623 continue;
624 }
625
626 // Count was successfully incremented to reflect the newly added value.
627 break;
628 }
629 } else {
630 // The memory was allocated from the process heap. This shouldn't happen
631 // because the persistent memory segment should be big enough for all
632 // thread stacks but it's better to support falling back to allocation
633 // from the heap rather than crash. Everything will work as normal but
634 // the data won't be persisted.
635 delete[] reinterpret_cast<char*>(mem_base);
636 }
637 }
638
639 // static
640 void GlobalActivityTracker::OnTLSDestroy(void* value) {
641 delete reinterpret_cast<ManagedActivityTracker*>(value);
642 }
643
644
645 ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
646 uint8_t action,
647 uint32_t id,
648 uint32_t info)
649 : GlobalActivityTracker::ScopedThreadActivity(
650 location.program_counter(),
651 static_cast<ThreadActivityTracker::ActivityType>(
652 ThreadActivityTracker::ACT_GENERIC | action),
653 ThreadActivityTracker::ActivityData::ForGeneric(id, info),
654 /*lock_allowed=*/true),
655 id_(id) {
656 // The action must not affect the category bits of the activity type.
657 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
658 }
659
660 void ScopedActivity::ChangeAction(uint8_t action) {
661 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
662 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
663 ThreadActivityTracker::ACT_GENERIC | action),
664 ThreadActivityTracker::kNullActivityData);
665 }
666
667 void ScopedActivity::ChangeInfo(uint32_t info) {
668 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL,
669 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
670 }
671
672 void ScopedActivity::ChangeActionAndInfo(uint8_t action, uint32_t info) {
673 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK);
674 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>(
675 ThreadActivityTracker::ACT_GENERIC | action),
676 ThreadActivityTracker::ActivityData::ForGeneric(id_, info));
677 }
678
679 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task)
680 : GlobalActivityTracker::ScopedThreadActivity(
681 task.posted_from.program_counter(),
682 ThreadActivityTracker::ACT_TASK_RUN,
683 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num),
684 /*lock_allowed=*/true) {}
685
686 ScopedLockAcquireActivity::ScopedLockAcquireActivity(
687 const base::internal::LockImpl* lock)
688 : GlobalActivityTracker::ScopedThreadActivity(
689 nullptr, // TODO(bcwhite): Find a real address.
690 ThreadActivityTracker::ACT_LOCK_ACQUIRE,
691 ThreadActivityTracker::ActivityData::ForLock(lock),
692 /*lock_allowed=*/false) {}
693
694 ScopedEventWaitActivity::ScopedEventWaitActivity(
695 const base::WaitableEvent* event)
696 : GlobalActivityTracker::ScopedThreadActivity(
697 nullptr, // TODO(bcwhite): Find a real address.
698 ThreadActivityTracker::ACT_EVENT_WAIT,
699 ThreadActivityTracker::ActivityData::ForEvent(event),
700 /*lock_allowed=*/true) {}
701
702 ScopedThreadJoinActivity::ScopedThreadJoinActivity(
703 const base::PlatformThreadHandle* thread)
704 : GlobalActivityTracker::ScopedThreadActivity(
705 nullptr, // TODO(bcwhite): Find a real address.
706 ThreadActivityTracker::ACT_THREAD_JOIN,
707 ThreadActivityTracker::ActivityData::ForThread(*thread),
708 /*lock_allowed=*/true) {}
709
710 #if !defined(OS_NACL) && !defined(OS_IOS)
711 ScopedProcessWaitActivity::ScopedProcessWaitActivity(
712 const base::Process* process)
713 : GlobalActivityTracker::ScopedThreadActivity(
714 nullptr, // TODO(bcwhite): Find a real address.
715 ThreadActivityTracker::ACT_PROCESS_WAIT,
716 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()),
717 /*lock_allowed=*/true) {}
718 #endif
719
720 } // namespace debug
721 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698