OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/debug/activity_tracker.h" | |
6 | |
7 #include <atomic> | |
8 | |
9 #include "base/files/file.h" | |
10 #include "base/files/file_path.h" | |
11 #include "base/files/memory_mapped_file.h" | |
12 #include "base/logging.h" | |
13 #include "base/memory/ptr_util.h" | |
14 #include "base/metrics/field_trial.h" | |
15 #include "base/pending_task.h" | |
16 #include "base/stl_util.h" | |
17 #include "base/strings/string_util.h" | |
18 | |
19 namespace base { | |
20 namespace debug { | |
21 | |
22 namespace { | |
23 | |
24 // A number that identifies the memory as having been initialized. It's | |
25 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker). | |
26 // A version number is added on so that major structure changes won't try to | |
27 // read an older version (since the cookie won't match). | |
28 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1 | |
29 | |
30 // The minimum depth a stack should support. | |
31 const int kMinStackDepth = 2; | |
32 | |
33 } // namespace | |
34 | |
35 | |
36 #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build. | |
37 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) { | |
38 const Feature kActivityTrackerFeature{ | |
39 "ActivityTracking", FEATURE_DISABLED_BY_DEFAULT | |
40 }; | |
41 | |
42 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature)) | |
43 return; | |
44 | |
45 // TODO(bcwhite): Adjust these numbers once there is real data to show | |
46 // just how much of an arena is necessary. | |
47 const size_t kMemorySize = 1 << 20; // 1 MiB | |
48 const int kStackDepth = 3; | |
49 const uint64_t kAllocatorId = 0; | |
50 const char kAllocatorName[] = "ActivityTracker"; | |
51 | |
52 GlobalActivityTracker::CreateWithFile( | |
53 file.AddExtension(PersistentMemoryAllocator::kFileExtension), | |
54 kMemorySize, kAllocatorId, kAllocatorName, kStackDepth); | |
55 } | |
56 #endif // !defined(OS_NACL) | |
57 | |
58 // This information is kept for every thread that is tracked. It is filled | |
59 // the very first time the thread is seen. All fields must be of exact sizes | |
60 // so there is no issue moving between 32 and 64-bit builds. | |
61 struct ThreadActivityTracker::Header { | |
62 // This unique number indicates a valid initialization of the memory. | |
63 uint64_t cookie; | |
64 | |
65 // The thread-id to which this data belongs. This identifier is not | |
66 // guaranteed to mean anything, just to be unique among all active | |
67 // trackers. | |
68 uint64_t thread_id; | |
69 | |
70 // The start-time and start-ticks when the data was created. Each activity | |
71 // record has a |time_ticks| value that can be converted to a "wall time" | |
72 // with these two values. | |
73 int64_t start_time; | |
74 int64_t start_ticks; | |
75 | |
76 // The number of Activity slots in the data. | |
77 uint32_t slots; | |
78 | |
79 // The current depth of the stack. This may be greater than the number of | |
80 // slots. If the depth exceeds the number of slots, the newest entries | |
81 // won't be recorded. | |
82 std::atomic<uint32_t> depth; | |
83 | |
84 // A memory location used to indicate if changes have been made to the stack | |
85 // that would invalidate an in-progress read of its contents. The active | |
86 // tracker will zero the value whenever something gets popped from the | |
87 // stack. A monitoring tracker can write a non-zero value here, copy the | |
88 // stack contents, and read the value to know, if it is still non-zero, that | |
89 // the contents didn't change while being copied. | |
90 std::atomic<uint32_t> unchanged; | |
91 | |
92 // The name of the thread (up to a maximum length). Dynamic-length names | |
93 // are not practical since the memory has to come from the same persistent | |
94 // allocator that holds this structure and to which this object has no | |
95 // reference. | |
96 char name[32]; | |
97 }; | |
98 | |
99 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) | |
100 : header_(static_cast<Header*>(base)), | |
101 stack_(reinterpret_cast<StackEntry*>(reinterpret_cast<char*>(base) + | |
102 sizeof(Header))), | |
103 stack_slots_((size - sizeof(Header)) / sizeof(StackEntry)) { | |
104 DCHECK(thread_checker_.CalledOnValidThread()); | |
105 DCHECK(base); | |
106 | |
107 // Ensure there is enough space for the header and at least a few records. | |
108 DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(StackEntry), size); | |
109 | |
110 // Ensure that the |stack_slots_| calculation didn't overflow. | |
111 DCHECK_GE(std::numeric_limits<uint32_t>::max(), | |
112 (size - sizeof(Header)) / sizeof(StackEntry)); | |
113 | |
114 // Provided memory should either be completely initialized or all zeros. | |
115 if (header_->cookie == 0) { | |
116 // This is a new file. Double-check other fields and then initialize. | |
117 DCHECK_EQ(0U, header_->thread_id); | |
118 DCHECK_EQ(0, header_->start_time); | |
119 DCHECK_EQ(0, header_->start_ticks); | |
120 DCHECK_EQ(0U, header_->slots); | |
121 DCHECK_EQ(0U, header_->depth.load(std::memory_order_relaxed)); | |
122 DCHECK_EQ(0U, header_->unchanged.load(std::memory_order_relaxed)); | |
123 DCHECK_EQ(0, stack_[0].time_ticks); | |
124 DCHECK_EQ(0U, stack_[0].source_address); | |
125 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | |
126 | |
127 header_->cookie = kHeaderCookie; | |
128 header_->thread_id = static_cast<uint64_t>(PlatformThread::CurrentId()); | |
129 header_->start_time = base::Time::Now().ToInternalValue(); | |
130 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); | |
131 header_->slots = stack_slots_; | |
132 strlcpy(header_->name, PlatformThread::GetName(), sizeof(header_->name)); | |
133 valid_ = true; | |
134 } else { | |
135 // This is a file with existing data. Perform basic consistency checks. | |
136 if (header_->cookie != kHeaderCookie || | |
137 header_->slots != stack_slots_ || | |
138 header_->start_time > base::Time::Now().ToInternalValue()) | |
139 return; | |
140 valid_ = true; | |
141 } | |
142 } | |
143 | |
144 ThreadActivityTracker::~ThreadActivityTracker() {} | |
145 | |
146 void ThreadActivityTracker::PushActivity(const void* source, | |
147 ActivityType activity, | |
148 const StackEntryData& data) { | |
149 // A thread-checker creates a lock to check the thread-id which means | |
150 // re-entry into this code when locks are being tracked. | |
151 DCHECK(activity == ACT_LOCK || thread_checker_.CalledOnValidThread()); | |
152 | |
153 // Get the current depth of the stack. No access to other memory guarded | |
154 // by this variable is done here so a "relaxed" load is acceptable. | |
155 uint32_t depth = header_->depth.load(std::memory_order_relaxed); | |
156 | |
157 // Handle the case where the stack depth has exceeded the storage capacity. | |
158 // Extra entries will be lost leaving only the base of the stack. | |
159 if (depth >= stack_slots_) { | |
160 // Since no other memory is being modified, a "relaxed" store is acceptable. | |
161 header_->depth.store(depth + 1, std::memory_order_relaxed); | |
162 return; | |
163 } | |
164 | |
165 // Get a pointer to the next entry and load it. No atomicity is required | |
166 // here because the memory is known only to this thread. It will be made | |
167 // known to other threads once the depth is incremented. | |
168 StackEntry* entry = &stack_[depth]; | |
169 entry->time_ticks = base::TimeTicks::Now().ToInternalValue(); | |
170 entry->source_address = reinterpret_cast<uintptr_t>(source); | |
171 entry->activity_type = activity; | |
172 entry->data = data; | |
173 | |
174 // Save the incremented depth. Because this guards |entry| memory filled | |
175 // above that may be read by another thread once the recorded depth changes, | |
176 // a "release" store is required. | |
177 header_->depth.store(depth + 1, std::memory_order_release); | |
178 } | |
179 | |
180 void ThreadActivityTracker::PopActivity(const void* source) { | |
181 // Do an atomic decrement of the depth. No changes to stack entries guarded | |
182 // by this variable is done here so a "relaxed" operation is acceptable. | |
183 // |depth| will receive the value before it was modified. | |
184 uint32_t depth = header_->depth.fetch_sub(1, std::memory_order_relaxed); | |
185 | |
186 // Validate that everything is running correctly. | |
187 DCHECK_LT(0U, depth); | |
188 if (depth <= stack_slots_) { | |
189 DCHECK_EQ(reinterpret_cast<uintptr_t>(source), | |
190 stack_[depth - 1].source_address); | |
191 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK || | |
192 thread_checker_.CalledOnValidThread()); | |
193 } | |
194 | |
195 // The stack has shrunk meaning that some other thread trying to copy the | |
196 // contents for reporting purposes could get bad data. That thread would | |
197 // have written a non-zero value into |unchanged|; clearing it here will | |
198 // let that thread detect that something did change. It doesn't matter | |
199 // when this is done relative to the atomic |depth| operation above so a | |
200 // "relaxed" access is acceptable. | |
201 header_->unchanged.store(0, std::memory_order_relaxed); | |
202 } | |
203 | |
204 // static | |
205 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | |
206 return static_cast<size_t>(stack_depth) * sizeof(StackEntry) + sizeof(Header); | |
207 } | |
208 | |
209 ThreadActivityAnalyzer::ThreadActivityAnalyzer(ThreadActivityTracker* tracker) | |
210 : ThreadActivityAnalyzer( | |
211 tracker->header_, | |
212 ThreadActivityTracker::SizeForStackDepth(tracker->stack_slots_)) {} | |
213 | |
214 ThreadActivityAnalyzer::ThreadActivityAnalyzer( | |
215 PersistentMemoryAllocator* allocator, | |
216 PersistentMemoryAllocator::Reference reference) | |
217 : ThreadActivityAnalyzer(allocator->GetAsObject<char>( | |
218 reference, | |
219 GlobalActivityTracker::kTypeIdActivityTracker), | |
220 allocator->GetAllocSize(reference)) {} | |
221 | |
222 ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size) | |
223 : tracker_(base, size) {} | |
224 | |
225 ThreadActivityAnalyzer::~ThreadActivityAnalyzer() {} | |
226 | |
227 uint32_t ThreadActivityAnalyzer::SnapshotStack( | |
228 std::vector<StackEntry>* snapshot) { | |
229 // It's possible for the data to change while reading it in such a way that it | |
230 // invalidates the read. Make several attempts but don't try forever. | |
231 const int kMaxAttempts = 10; | |
232 uint32_t depth; | |
233 | |
234 // Start with an empty return stack. | |
235 snapshot->clear(); | |
236 | |
237 // Stop here if the data isn't valid. | |
238 if (!tracker_.is_valid()) | |
239 return 0; | |
240 | |
241 ThreadActivityTracker::Header* header = tracker_.header_; | |
242 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | |
243 // Write a non-zero value to |unchanged| so it's possible to detect at | |
244 // the end that nothing has changed since copying the data began. | |
245 header->unchanged.store(1, std::memory_order_relaxed); | |
246 | |
247 // Fetching the current depth also "acquires" the contents of the stack. | |
248 depth = header->depth.load(std::memory_order_acquire); | |
249 if (depth == 0) | |
250 return 0; | |
251 | |
252 // Copy the existing contents. Memcpy is used for speed. | |
253 uint32_t count = std::min(depth, tracker_.stack_slots_); | |
254 snapshot->resize(count); | |
255 memcpy(&(*snapshot)[0], tracker_.stack_, count * sizeof(StackEntry)); | |
256 | |
257 // Check to make sure everything was unchanged during the copy. | |
258 if (header->unchanged.load(std::memory_order_relaxed)) | |
259 return depth; | |
260 } | |
261 | |
262 // If all attempts failed, just return the depth with no content. | |
263 snapshot->clear(); | |
264 return depth; | |
265 } | |
266 | |
267 | |
268 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; | |
269 | |
270 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( | |
271 PersistentMemoryAllocator::Reference mem_reference, | |
272 void* base, | |
273 size_t size) | |
274 : ThreadActivityTracker(base, size), | |
275 mem_reference_(mem_reference), | |
276 mem_base_(base) {} | |
277 | |
278 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { | |
279 // The global |g_tracker_| must point to the owner of this class since all | |
280 // objects of this type must be destructed before |g_tracker_| can be changed | |
281 // (something that only occurs in tests). | |
282 DCHECK(g_tracker_); | |
283 g_tracker_->ReturnTrackerMemory(this, mem_reference_, mem_base_); | |
284 } | |
285 | |
286 void GlobalActivityTracker::CreateWithAllocator( | |
287 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
288 int stack_depth) { | |
289 // There's no need to do anything with the result. It is self-managing. | |
290 new GlobalActivityTracker(std::move(allocator), stack_depth); | |
291 } | |
292 | |
293 // static | |
294 void GlobalActivityTracker::CreateWithLocalMemory(size_t size, | |
295 uint64_t id, | |
296 StringPiece name, | |
297 int stack_depth) { | |
298 CreateWithAllocator( | |
299 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | |
300 stack_depth); | |
301 } | |
302 | |
303 #if !defined(OS_NACL) | |
304 // static | |
305 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, | |
306 size_t size, | |
307 uint64_t id, | |
308 StringPiece name, | |
309 int stack_depth) { | |
310 DCHECK(!file_path.empty()); | |
311 | |
312 // Create the file, overwriting anything that was there previously, and set | |
313 // the length. This will create a space that is zero-filled, a requirement | |
314 // for operation. | |
315 File file(file_path, File::FLAG_CREATE_ALWAYS | File::FLAG_READ | | |
316 File::FLAG_WRITE | File::FLAG_SHARE_DELETE); | |
317 DCHECK(file.IsValid()); | |
318 file.SetLength(size); | |
319 | |
320 // Map the file into memory and make it globally available. | |
321 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); | |
322 bool success = | |
323 mapped_file->Initialize(std::move(file), MemoryMappedFile::READ_WRITE); | |
324 DCHECK(success); | |
325 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( | |
326 std::move(mapped_file), size, id, name, false)), | |
327 stack_depth); | |
328 } | |
329 #endif // !defined(OS_NACL) | |
330 | |
331 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | |
332 DCHECK(!this_thread_tracker_.Get()); | |
333 | |
334 PersistentMemoryAllocator::Reference mem_reference = 0; | |
335 void* mem_base = nullptr; | |
336 | |
337 // Get the current count of available memories, acquiring the array values. | |
338 int count = available_memories_count_.load(std::memory_order_acquire); | |
339 while (count > 0) { | |
340 // There is a memory block that was previously released (and zero'd) so | |
341 // just re-use that rather than allocating a new one. | |
342 mem_reference = | |
343 available_memories_[count - 1].load(std::memory_order_relaxed); | |
344 DCHECK(mem_reference); | |
345 | |
346 // Decrement the count indicating that the value has been taken. If this | |
347 // fails then something else, another thread doing push or pop, has changed | |
348 // the stack; retry if so. |count| will receive the existing value. | |
349 if (!available_memories_count_.compare_exchange_weak( | |
350 count, count - 1, | |
351 std::memory_order_acquire, std::memory_order_acquire)) { | |
352 continue; | |
353 } | |
354 | |
355 // Clear the value just read from the array so that the "push" operation | |
356 // knows there is no value there and will work correctly. | |
357 available_memories_[count - 1].store(0, std::memory_order_relaxed); | |
358 | |
359 // Turn the reference back into one of the activity-tracker type. | |
360 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
361 kTypeIdActivityTrackerFree); | |
362 DCHECK(mem_base); | |
363 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
364 allocator_->SetType(mem_reference, kTypeIdActivityTracker); | |
365 | |
366 // Success. | |
367 break; | |
368 } | |
369 | |
370 if (count == 0) { | |
371 // Allocate a block of memory from the persistent segment. | |
372 mem_reference = | |
373 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | |
374 if (mem_reference) { | |
375 // Success. Convert the reference to an actual memory address. | |
376 mem_base = | |
377 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
378 // Make the allocation iterable so it can be found by other processes. | |
379 allocator_->MakeIterable(mem_reference); | |
380 } else { | |
381 // Failure. This should never happen. | |
manzagop (departed)
2016/06/01 21:59:40
It can if we didn't plan for enough memory, right?
bcwhite
2016/06/02 16:18:16
Right. I'll add to the comment.
| |
382 NOTREACHED(); | |
383 // But if it does, handle it gracefully by allocating the required | |
384 // memory from the heap. | |
385 mem_base = new char[stack_memory_size_]; | |
386 memset(mem_base, 0, stack_memory_size_); | |
387 } | |
388 } | |
389 | |
390 // Create a tracker with the acquired memory and set it as the tracker | |
391 // for this particular thread in thread-local-storage. | |
392 DCHECK(mem_base); | |
393 ManagedActivityTracker* tracker = | |
394 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | |
395 DCHECK(tracker->is_valid()); | |
396 this_thread_tracker_.Set(tracker); | |
397 thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | |
398 | |
399 return tracker; | |
400 } | |
401 | |
402 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | |
403 ThreadActivityTracker* tracker = | |
404 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | |
405 if (tracker) { | |
406 this_thread_tracker_.Free(); | |
407 delete tracker; | |
408 } | |
409 } | |
410 | |
411 GlobalActivityTracker::GlobalActivityTracker( | |
412 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
413 int stack_depth) | |
414 : allocator_(std::move(allocator)), | |
415 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | |
416 this_thread_tracker_(&OnTLSDestroy), | |
417 thread_tracker_count_(0), | |
418 available_memories_count_(0) { | |
419 // Clear the available-memories array. | |
420 memset(available_memories_, 0, sizeof(available_memories_)); | |
421 | |
422 // Ensure the passed memory is valid and empty (iterator finds nothing). | |
423 uint32_t type; | |
424 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | |
425 | |
426 // Ensure that there is no other global object and then make this one such. | |
427 DCHECK(!g_tracker_); | |
428 g_tracker_ = this; | |
429 | |
430 // Create a tracker for this thread since it is known. | |
431 CreateTrackerForCurrentThread(); | |
432 } | |
433 | |
434 GlobalActivityTracker::~GlobalActivityTracker() { | |
435 DCHECK_EQ(g_tracker_, this); | |
436 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | |
437 g_tracker_ = nullptr; | |
438 } | |
439 | |
440 void GlobalActivityTracker::ReturnTrackerMemory( | |
441 ManagedActivityTracker* tracker, | |
442 PersistentMemoryAllocator::Reference mem_reference, | |
443 void* mem_base) { | |
444 // Zero the memory so that it is ready for use if needed again later. It's | |
445 // better to clear the memory now, when a thread is exiting, than to do it | |
446 // when it is first needed by a thread doing actual work. | |
447 memset(mem_base, 0, stack_memory_size_); | |
448 | |
449 // Remove the destructed tracker from the set of known ones. | |
450 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | |
451 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | |
452 | |
453 // Deal with the memory that was used by the tracker. | |
454 if (mem_reference) { | |
455 // The memory was within the persistent memory allocator. Change its type | |
456 // so that iteration won't find it. | |
457 allocator_->SetType(mem_reference, kTypeIdActivityTrackerFree); | |
458 // There is no way to free memory from a persistent allocator so instead | |
459 // push it on the internal list of available memory blocks. | |
460 while (true) { | |
461 // Get the existing count of available memories and ensure we won't | |
462 // burst the array. Acquire the values in the array. | |
463 int count = available_memories_count_.load(std::memory_order_acquire); | |
464 if (count >= kMaxThreadCount) { | |
465 NOTREACHED(); | |
466 // Storage is full. Just forget about this memory. It won't be re-used | |
467 // but there's no real loss. | |
468 break; | |
469 } | |
470 | |
471 // Write the reference of the memory being returned to this slot in the | |
472 // array. Empty slots have a value of zero so do an atomic compare-and- | |
473 // exchange to ensure that a race condition doesn't exist with another | |
474 // thread doing the same. | |
475 PersistentMemoryAllocator::Reference mem_expected = 0; | |
476 if (!available_memories_[count].compare_exchange_weak( | |
477 mem_expected, mem_reference, | |
478 std::memory_order_release, std::memory_order_relaxed)) { | |
479 continue; // Try again. | |
480 } | |
481 | |
482 // Increment the count, releasing the value written to the array. This | |
483 // could fail if a simultaneous "pop" operation decremented the counter. | |
484 // If that happens, clear the array slot and start over. Do a "strong" | |
485 // exchange to avoid spurious retries that can occur with a "weak" one. | |
486 int expected = count; // Updated by compare/exchange. | |
487 if (!available_memories_count_.compare_exchange_strong( | |
488 expected, count + 1, | |
489 std::memory_order_release, std::memory_order_relaxed)) { | |
490 available_memories_[count].store(0, std::memory_order_relaxed); | |
491 continue; | |
492 } | |
493 | |
494 // Count was successfully incremented to refrect the new value added. | |
manzagop (departed)
2016/06/01 21:59:41
typo: reflect
bcwhite
2016/06/02 16:18:16
Done.
| |
495 break; | |
496 } | |
497 } else { | |
498 // The memory was allocated from the process heap. This shouldn't happen | |
499 // because the persistent memory segment should be big enough for all | |
500 // thread stacks but it's better to support falling back to allocation | |
501 // from the heap rather than crash. Everything will work as normal but | |
502 // the data won't be persisted. | |
503 delete[] reinterpret_cast<char*>(mem_base); | |
504 } | |
505 } | |
506 | |
507 // static | |
508 void GlobalActivityTracker::OnTLSDestroy(void* value) { | |
509 delete reinterpret_cast<ManagedActivityTracker*>(value); | |
510 } | |
511 | |
512 | |
513 ScopedTaskActivity::ScopedTaskActivity(const base::PendingTask& task) | |
514 : GlobalActivityTracker::ScopedThreadActivity( | |
515 task.posted_from.program_counter(), | |
516 ThreadActivityTracker::ACT_TASK, | |
517 ThreadActivityTracker::StackEntryData::ForTask(task.sequence_num), | |
518 /*lock_allowed=*/true) {} | |
519 | |
520 ScopedLockActivity::ScopedLockActivity(const base::internal::LockImpl* lock) | |
521 : GlobalActivityTracker::ScopedThreadActivity( | |
522 nullptr, | |
523 ThreadActivityTracker::ACT_LOCK, | |
524 ThreadActivityTracker::StackEntryData::ForLock(lock), | |
525 /*lock_allowed=*/false) {} | |
526 | |
527 ScopedEventActivity::ScopedEventActivity(const base::WaitableEvent* event) | |
528 : GlobalActivityTracker::ScopedThreadActivity( | |
529 nullptr, | |
530 ThreadActivityTracker::ACT_EVENT, | |
531 ThreadActivityTracker::StackEntryData::ForEvent(event), | |
532 /*lock_allowed=*/true) {} | |
533 | |
534 } // namespace debug | |
535 } // namespace base | |
OLD | NEW |