OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/debug/activity_tracker.h" | |
6 | |
7 #include <atomic> | |
8 | |
9 #include "base/feature_list.h" | |
10 #include "base/files/file.h" | |
11 #include "base/files/file_path.h" | |
12 #include "base/files/memory_mapped_file.h" | |
13 #include "base/logging.h" | |
14 #include "base/memory/ptr_util.h" | |
15 #include "base/metrics/field_trial.h" | |
16 #include "base/metrics/histogram_macros.h" | |
17 #include "base/pending_task.h" | |
18 #include "base/process/process.h" | |
19 #include "base/process/process_handle.h" | |
20 #include "base/stl_util.h" | |
21 #include "base/strings/string_util.h" | |
22 #include "base/threading/platform_thread.h" | |
23 | |
24 namespace base { | |
25 namespace debug { | |
26 | |
27 namespace { | |
28 | |
29 // A number that identifies the memory as having been initialized. It's | |
30 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker). | |
31 // A version number is added on so that major structure changes won't try to | |
32 // read an older version (since the cookie won't match). | |
33 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1 | |
34 | |
35 // The minimum depth a stack should support. | |
36 const int kMinStackDepth = 2; | |
37 | |
38 } // namespace | |
39 | |
40 | |
41 #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build. | |
42 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) { | |
43 const Feature kActivityTrackerFeature{ | |
44 "ActivityTracking", FEATURE_DISABLED_BY_DEFAULT | |
45 }; | |
46 | |
47 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature)) | |
48 return; | |
49 | |
50 // TODO(bcwhite): Adjust these numbers once there is real data to show | |
51 // just how much of an arena is necessary. | |
52 const size_t kMemorySize = 1 << 20; // 1 MiB | |
53 const int kStackDepth = 4; | |
54 const uint64_t kAllocatorId = 0; | |
55 const char kAllocatorName[] = "ActivityTracker"; | |
56 | |
57 GlobalActivityTracker::CreateWithFile( | |
58 file.AddExtension(PersistentMemoryAllocator::kFileExtension), | |
59 kMemorySize, kAllocatorId, kAllocatorName, kStackDepth); | |
60 } | |
61 #endif // !defined(OS_NACL) | |
62 | |
63 | |
64 // This information is kept for every thread that is tracked. It is filled | |
65 // the very first time the thread is seen. All fields must be of exact sizes | |
66 // so there is no issue moving between 32 and 64-bit builds. | |
67 struct ThreadActivityTracker::Header { | |
68 // This unique number indicates a valid initialization of the memory. | |
69 uint64_t cookie; | |
70 | |
71 // The process-id and thread-id to which this data belongs. These identifiers | |
72 // are not guaranteed to mean anything but are unique, in combination, among | |
73 // all active trackers. | |
74 int64_t process_id; | |
Sigurður Ásgeirsson
2016/06/14 15:28:13
if these things share a segment across multiple pr
bcwhite
2016/06/14 19:48:45
Right. "Among all *active* trackers". Once a pro
| |
75 union { | |
76 int64_t as_id; | |
77 PlatformThreadHandle::Handle as_handle; | |
78 } thread_ref; | |
79 | |
80 // The start-time and start-ticks when the data was created. Each activity | |
81 // record has a |time_internal| value that can be converted to a "wall time" | |
82 // with these two values. | |
83 int64_t start_time; | |
84 int64_t start_ticks; | |
85 | |
86 // The number of Activity slots in the data. | |
87 uint32_t stack_slots; | |
Sigurður Ásgeirsson
2016/06/14 15:28:13
Does the analyzer guard against OOB reads if this
bcwhite
2016/06/14 19:48:46
Yes. It validates the data structures during init
| |
88 | |
89 // The current depth of the stack. This may be greater than the number of | |
90 // slots. If the depth exceeds the number of slots, the newest entries | |
91 // won't be recorded. | |
92 std::atomic<uint32_t> current_depth; | |
Sigurður Ásgeirsson
2016/06/14 15:28:13
if the underlying segment is shared, can I cause a
bcwhite
2016/06/14 19:48:45
This field is only manipulated by the thread being
| |
93 | |
94 // A memory location used to indicate if changes have been made to the stack | |
95 // that would invalidate an in-progress read of its contents. The active | |
96 // tracker will zero the value whenever something gets popped from the | |
97 // stack. A monitoring tracker can write a non-zero value here, copy the | |
98 // stack contents, and read the value to know, if it is still non-zero, that | |
99 // the contents didn't change while being copied. | |
100 std::atomic<uint32_t> stack_unchanged; | |
101 | |
102 // The name of the thread (up to a maximum length). Dynamic-length names | |
103 // are not practical since the memory has to come from the same persistent | |
104 // allocator that holds this structure and to which this object has no | |
105 // reference. | |
106 char thread_name[32]; | |
107 }; | |
108 | |
109 // It doesn't matter what is contained in this (though it will be all zeros) | |
110 // as only the address of it is important. | |
111 const ThreadActivityTracker::ActivityData | |
112 ThreadActivityTracker::kNullActivityData = {}; | |
113 | |
114 ThreadActivityTracker::ActivityData | |
115 ThreadActivityTracker::ActivityData::ForThread( | |
116 const PlatformThreadHandle& handle) { | |
117 // Header already has a conversion union; reuse that. | |
118 ThreadActivityTracker::Header header; | |
119 header.thread_ref.as_id = 0; // Zero the union in case as_handle is smaller. | |
120 header.thread_ref.as_handle = handle.platform_handle(); | |
121 return ForThread(header.thread_ref.as_id); | |
122 } | |
123 | |
124 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {} | |
125 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {} | |
126 | |
127 | |
128 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) | |
129 : header_(static_cast<Header*>(base)), | |
130 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + | |
131 sizeof(Header))), | |
132 stack_slots_( | |
133 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { | |
134 DCHECK(thread_checker_.CalledOnValidThread()); | |
135 DCHECK(base); | |
136 | |
137 // Ensure that the thread reference doesn't exceed the size of the ID number. | |
138 // This won't compile at the global scope because Header is a private struct. | |
139 static_assert( | |
140 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), | |
141 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); | |
142 | |
143 // Ensure there is enough space for the header and at least a few records. | |
144 DCHECK_LE(sizeof(Header) + kMinStackDepth * sizeof(Activity), size); | |
145 | |
146 // Ensure that the |stack_slots_| calculation didn't overflow. | |
147 DCHECK_GE(std::numeric_limits<uint32_t>::max(), | |
148 (size - sizeof(Header)) / sizeof(Activity)); | |
149 | |
150 // Provided memory should either be completely initialized or all zeros. | |
151 if (header_->cookie == 0) { | |
152 // This is a new file. Double-check other fields and then initialize. | |
153 DCHECK_EQ(0, header_->process_id); | |
154 DCHECK_EQ(0, header_->thread_ref.as_id); | |
155 DCHECK_EQ(0, header_->start_time); | |
156 DCHECK_EQ(0, header_->start_ticks); | |
157 DCHECK_EQ(0U, header_->stack_slots); | |
158 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); | |
159 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); | |
160 DCHECK_EQ(0, stack_[0].time_internal); | |
161 DCHECK_EQ(0U, stack_[0].source_address); | |
162 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | |
163 | |
164 header_->process_id = GetCurrentProcId(); | |
165 header_->thread_ref.as_handle = | |
166 PlatformThread::CurrentHandle().platform_handle(); | |
167 header_->start_time = base::Time::Now().ToInternalValue(); | |
168 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); | |
169 header_->stack_slots = stack_slots_; | |
170 strlcpy(header_->thread_name, PlatformThread::GetName(), | |
171 sizeof(header_->thread_name)); | |
172 header_->cookie = kHeaderCookie; | |
173 valid_ = true; | |
174 DCHECK(IsValid()); | |
175 } else { | |
176 // This is a file with existing data. Perform basic consistency checks. | |
177 valid_ = true; | |
178 valid_ = IsValid(); | |
179 } | |
180 } | |
181 | |
182 ThreadActivityTracker::~ThreadActivityTracker() {} | |
183 | |
184 void ThreadActivityTracker::PushActivity(const void* source, | |
185 ActivityType type, | |
186 const ActivityData& data) { | |
187 // A thread-checker creates a lock to check the thread-id which means | |
188 // re-entry into this code if lock acquisitions are being tracked. | |
189 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread()); | |
190 | |
191 // Get the current depth of the stack. No access to other memory guarded | |
192 // by this variable is done here so a "relaxed" load is acceptable. | |
193 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); | |
Sigurður Ásgeirsson
2016/06/14 15:28:13
It seems you should be able to maintain the curren
bcwhite
2016/06/14 19:48:45
I suppose it could... but I really don't like tha
| |
194 | |
195 // Handle the case where the stack depth has exceeded the storage capacity. | |
196 // Extra entries will be lost leaving only the base of the stack. | |
197 if (depth >= stack_slots_) { | |
198 // Since no other threads modify the data, no compare/exchange is needed. | |
199 // Since no other memory is being modified, a "relaxed" store is acceptable. | |
200 header_->current_depth.store(depth + 1, std::memory_order_relaxed); | |
201 return; | |
202 } | |
203 | |
204 // Get a pointer to the next activity and load it. No atomicity is required | |
205 // here because the memory is known only to this thread. It will be made | |
206 // known to other threads once the depth is incremented. | |
207 Activity* activity = &stack_[depth]; | |
208 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); | |
209 activity->source_address = reinterpret_cast<uintptr_t>(source); | |
210 activity->activity_type = type; | |
211 activity->data = data; | |
212 | |
213 // Save the incremented depth. Because this guards |activity| memory filled | |
214 // above that may be read by another thread once the recorded depth changes, | |
215 // a "release" store is required. | |
216 header_->current_depth.store(depth + 1, std::memory_order_release); | |
217 } | |
218 | |
219 void ThreadActivityTracker::ChangeActivity(const void* source, | |
Sigurður Ásgeirsson
2016/06/14 15:28:13
Siggi: Continue from here.
| |
220 ActivityType type, | |
221 const ActivityData& data) { | |
222 DCHECK(thread_checker_.CalledOnValidThread()); | |
223 DCHECK(type != ACT_NULL || &data != &kNullActivityData); | |
224 | |
225 // Get the current depth of the stack. | |
226 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); | |
227 DCHECK_LT(0U, depth); | |
228 | |
229 // Update the information if it is being recorded (i.e. within slot limit). | |
230 if (depth <= stack_slots_) { | |
231 Activity* activity = &stack_[depth - 1]; | |
232 DCHECK_EQ(reinterpret_cast<uintptr_t>(source), activity->source_address); | |
233 | |
234 if (type != ACT_NULL) { | |
235 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK, | |
236 type & ACT_CATEGORY_MASK); | |
237 activity->activity_type = type; | |
238 } | |
239 | |
240 if (&data != &kNullActivityData) | |
241 activity->data = data; | |
242 } | |
243 } | |
244 | |
245 void ThreadActivityTracker::PopActivity(const void* source) { | |
246 // Do an atomic decrement of the depth. No changes to stack entries guarded | |
247 // by this variable are done here so a "relaxed" operation is acceptable. | |
248 // |depth| will receive the value BEFORE it was modified. | |
249 uint32_t depth = | |
250 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); | |
251 | |
252 // Validate that everything is running correctly. | |
253 DCHECK_LT(0U, depth); | |
254 if (depth <= stack_slots_) { | |
255 DCHECK_EQ(reinterpret_cast<uintptr_t>(source), | |
256 stack_[depth - 1].source_address); | |
257 DCHECK(stack_[depth - 1].activity_type == ACT_LOCK_ACQUIRE || | |
258 thread_checker_.CalledOnValidThread()); | |
259 } | |
260 | |
261 // The stack has shrunk meaning that some other thread trying to copy the | |
262 // contents for reporting purposes could get bad data. That thread would | |
263 // have written a non-zero value into |stack_unchanged|; clearing it here | |
264 // will let that thread detect that something did change. This needs to | |
265 // happen after the atomic |depth| operation above so a "release" store | |
266 // is required. | |
267 header_->stack_unchanged.store(0, std::memory_order_release); | |
268 } | |
269 | |
270 bool ThreadActivityTracker::IsValid() const { | |
271 if (header_->cookie != kHeaderCookie || | |
272 header_->process_id == 0 || | |
273 header_->thread_ref.as_id == 0 || | |
274 header_->start_time == 0 || | |
275 header_->start_ticks == 0 || | |
276 header_->stack_slots != stack_slots_) { | |
277 return false; | |
278 } | |
279 | |
280 return valid_; | |
281 } | |
282 | |
283 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const { | |
284 DCHECK(output_snapshot); | |
285 | |
286 // There is no "called on valid thread" check for this method as it can be | |
287 // called from other threads or even other processes. It is also the reason | |
288 // why atomic operations must be used in certain places above. | |
289 | |
290 // It's possible for the data to change while reading it in such a way that it | |
291 // invalidates the read. Make several attempts but don't try forever. | |
292 const int kMaxAttempts = 10; | |
293 uint32_t depth; | |
294 | |
295 // Stop here if the data isn't valid. | |
296 if (!IsValid()) | |
297 return false; | |
298 | |
299 // Start with an empty return stack. | |
300 output_snapshot->activity_stack.clear(); | |
301 | |
302 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | |
303 // Remember the process and thread IDs to ensure they aren't replaced | |
304 // during the snapshot operation. | |
305 const int64_t starting_process_id = header_->process_id; | |
306 const int64_t starting_thread_id = header_->thread_ref.as_id; | |
307 | |
308 // Write a non-zero value to |stack_unchanged| so it's possible to detect | |
309 // at the end that nothing has changed since copying the data began. A | |
310 // "cst" operation is required to ensure it occurs before everything else. | |
311 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | |
312 | |
313 // Fetching the current depth also "acquires" the contents of the stack. | |
314 depth = header_->current_depth.load(std::memory_order_acquire); | |
315 if (depth > 0) { | |
316 // Copy the existing contents. Memcpy is used for speed. | |
317 uint32_t count = std::min(depth, stack_slots_); | |
318 output_snapshot->activity_stack.resize(count); | |
319 memcpy(&output_snapshot->activity_stack[0], stack_, | |
320 count * sizeof(Activity)); | |
321 } | |
322 | |
323 // Retry if something changed during the copy. A "cst" operation ensures | |
324 // it must happen after all the above operations. | |
325 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) | |
326 continue; | |
327 | |
328 // Stack copied. Record it's full depth. | |
329 output_snapshot->activity_stack_depth = depth; | |
330 | |
331 // TODO(bcwhite): Snapshot other things here. | |
332 | |
333 // Get the general thread information. | |
334 output_snapshot->thread_name = header_->thread_name; | |
Sigurður Ásgeirsson
2016/06/14 15:53:26
if thread_name is not zero-terminated (abused) thi
bcwhite
2016/06/14 19:48:46
Done.
| |
335 output_snapshot->process_id = header_->process_id; | |
336 output_snapshot->thread_id = header_->thread_ref.as_id; | |
337 | |
338 // If the process or thread ID has changed then the tracker has exited and | |
339 // the memory reused by a new one. Try again. | |
340 if (output_snapshot->process_id != starting_process_id || | |
341 output_snapshot->thread_id != starting_thread_id) { | |
342 continue; | |
343 } | |
344 | |
345 // Only successful if the data is still valid once everything is done since | |
346 // it's possible for the thread to end somewhere in the middle and all its | |
347 // values become garbage. | |
348 if (!IsValid()) | |
349 return false; | |
350 | |
351 // Change all the timestamps in the activities from "ticks" to "wall" time. | |
352 const Time start_time = Time::FromInternalValue(header_->start_time); | |
353 const int64_t start_ticks = header_->start_ticks; | |
354 for (Activity& activity : output_snapshot->activity_stack) { | |
355 activity.time_internal = | |
356 (start_time + | |
357 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) | |
358 .ToInternalValue(); | |
359 } | |
360 | |
361 // Success! | |
362 return true; | |
363 } | |
364 | |
365 // Too many attempts. | |
366 return false; | |
367 } | |
368 | |
369 // static | |
370 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | |
371 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | |
372 } | |
373 | |
374 | |
375 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; | |
376 | |
377 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( | |
378 PersistentMemoryAllocator::Reference mem_reference, | |
379 void* base, | |
380 size_t size) | |
381 : ThreadActivityTracker(base, size), | |
382 mem_reference_(mem_reference), | |
383 mem_base_(base) {} | |
384 | |
385 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { | |
386 // The global |g_tracker_| must point to the owner of this class since all | |
387 // objects of this type must be destructed before |g_tracker_| can be changed | |
388 // (something that only occurs in tests). | |
389 DCHECK(g_tracker_); | |
390 g_tracker_->ReturnTrackerMemory(this); | |
391 } | |
392 | |
393 void GlobalActivityTracker::CreateWithAllocator( | |
394 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
395 int stack_depth) { | |
396 // There's no need to do anything with the result. It is self-managing. | |
397 GlobalActivityTracker* global_tracker = | |
398 new GlobalActivityTracker(std::move(allocator), stack_depth); | |
399 // Create a tracker for this thread since it is known. | |
400 global_tracker->CreateTrackerForCurrentThread(); | |
401 } | |
402 | |
403 #if !defined(OS_NACL) | |
404 // static | |
405 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, | |
406 size_t size, | |
407 uint64_t id, | |
408 StringPiece name, | |
409 int stack_depth) { | |
410 DCHECK(!file_path.empty()); | |
411 | |
412 // Create and map the file into memory and make it globally available. | |
413 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); | |
414 bool success = | |
415 mapped_file->Initialize(File(file_path, | |
416 File::FLAG_CREATE_ALWAYS | File::FLAG_READ | | |
417 File::FLAG_WRITE | File::FLAG_SHARE_DELETE), | |
418 {0, size}, MemoryMappedFile::READ_WRITE_EXTEND); | |
419 DCHECK(success); | |
420 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( | |
421 std::move(mapped_file), size, id, name, false)), | |
422 stack_depth); | |
423 } | |
424 #endif // !defined(OS_NACL) | |
425 | |
426 // static | |
427 void GlobalActivityTracker::CreateWithLocalMemory(size_t size, | |
428 uint64_t id, | |
429 StringPiece name, | |
430 int stack_depth) { | |
431 CreateWithAllocator( | |
432 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | |
433 stack_depth); | |
434 } | |
435 | |
436 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | |
437 DCHECK(!this_thread_tracker_.Get()); | |
438 | |
439 PersistentMemoryAllocator::Reference mem_reference = 0; | |
440 void* mem_base = nullptr; | |
441 | |
442 // Get the current count of available memories, acquiring the array values. | |
443 int count = available_memories_count_.load(std::memory_order_acquire); | |
444 while (count > 0) { | |
445 // There is a memory block that was previously released (and zero'd) so | |
446 // just re-use that rather than allocating a new one. Use "acquire" so | |
447 // operations below can be re-ordered above. | |
448 mem_reference = | |
449 available_memories_[count - 1].load(std::memory_order_acquire); | |
450 DCHECK(mem_reference); | |
451 | |
452 // Decrement the count indicating that the value has been taken. If this | |
453 // fails then something else, another thread doing push or pop, has changed | |
454 // the stack; retry if so. | |
455 // NOTE: |count| will be loaded with the existing value and affect the | |
456 // "while" condition. | |
457 if (!available_memories_count_.compare_exchange_weak( | |
458 count, count - 1, | |
459 std::memory_order_acquire, std::memory_order_acquire)) { | |
460 continue; | |
461 } | |
462 | |
463 // Clear the value just read from the array so that the "push" operation | |
464 // knows there is no value there and will work correctly. | |
465 available_memories_[count - 1].store(0, std::memory_order_relaxed); | |
466 | |
467 // Turn the reference back into one of the activity-tracker type. | |
468 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
469 kTypeIdActivityTrackerFree); | |
470 DCHECK(mem_base); | |
471 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
472 allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
473 kTypeIdActivityTrackerFree); | |
474 | |
475 // Success. | |
476 break; | |
477 } | |
478 | |
479 // Handle the case where no previously-used memories are available. | |
480 if (count == 0) { | |
481 // Allocate a block of memory from the persistent segment. | |
482 mem_reference = | |
483 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | |
484 if (mem_reference) { | |
485 // Success. Convert the reference to an actual memory address. | |
486 mem_base = | |
487 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
488 // Make the allocation iterable so it can be found by other processes. | |
489 allocator_->MakeIterable(mem_reference); | |
490 } else { | |
491 // Failure. This shouldn't happen. | |
492 NOTREACHED(); | |
493 // But if it does, probably because the allocator wasn't given enough | |
494 // memory to satisfy all possible requests, handle it gracefully by | |
495 // allocating the required memory from the heap. | |
496 mem_base = new char[stack_memory_size_]; | |
497 memset(mem_base, 0, stack_memory_size_); | |
498 // Report the thread-count at which the allocator was full so that the | |
499 // failure can be seen and underlying memory resized appropriately. | |
500 UMA_HISTOGRAM_COUNTS_1000( | |
501 "UMA.ActivityTracker.ThreadTrackers.MemLimit", | |
502 thread_tracker_count_.load(std::memory_order_relaxed)); | |
503 } | |
504 } | |
505 | |
506 // Create a tracker with the acquired memory and set it as the tracker | |
507 // for this particular thread in thread-local-storage. | |
508 DCHECK(mem_base); | |
509 ManagedActivityTracker* tracker = | |
510 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | |
511 DCHECK(tracker->IsValid()); | |
512 this_thread_tracker_.Set(tracker); | |
513 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | |
514 | |
515 UMA_HISTOGRAM_ENUMERATION("UMA.ActivityTracker.ThreadTrackers.Count", | |
516 old_count + 1, kMaxThreadCount); | |
517 return tracker; | |
518 } | |
519 | |
520 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | |
521 ThreadActivityTracker* tracker = | |
522 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | |
523 if (tracker) { | |
524 this_thread_tracker_.Free(); | |
525 delete tracker; | |
526 } | |
527 } | |
528 | |
529 GlobalActivityTracker::GlobalActivityTracker( | |
530 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
531 int stack_depth) | |
532 : allocator_(std::move(allocator)), | |
533 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | |
534 this_thread_tracker_(&OnTLSDestroy), | |
535 thread_tracker_count_(0), | |
536 available_memories_count_(0) { | |
537 // Clear the available-memories array. | |
538 memset(available_memories_, 0, sizeof(available_memories_)); | |
539 | |
540 // Ensure the passed memory is valid and empty (iterator finds nothing). | |
541 uint32_t type; | |
542 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | |
543 | |
544 // Ensure that there is no other global object and then make this one such. | |
545 DCHECK(!g_tracker_); | |
546 g_tracker_ = this; | |
547 } | |
548 | |
549 GlobalActivityTracker::~GlobalActivityTracker() { | |
550 DCHECK_EQ(g_tracker_, this); | |
551 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | |
552 g_tracker_ = nullptr; | |
553 } | |
554 | |
555 void GlobalActivityTracker::ReturnTrackerMemory( | |
556 ManagedActivityTracker* tracker) { | |
557 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | |
558 void* mem_base = tracker->mem_base_; | |
559 | |
560 // Zero the memory so that it is ready for use if needed again later. It's | |
561 // better to clear the memory now, when a thread is exiting, than to do it | |
562 // when it is first needed by a thread doing actual work. | |
563 memset(mem_base, 0, stack_memory_size_); | |
564 | |
565 // Remove the destructed tracker from the set of known ones. | |
566 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | |
567 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | |
568 | |
569 // Deal with the memory that was used by the tracker. | |
570 if (mem_reference) { | |
571 // The memory was within the persistent memory allocator. Change its type | |
572 // so that iteration won't find it. | |
573 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | |
574 kTypeIdActivityTracker); | |
575 // There is no way to free memory from a persistent allocator so instead | |
576 // push it on the internal list of available memory blocks. | |
577 while (true) { | |
578 // Get the existing count of available memories and ensure we won't | |
579 // burst the array. Acquire the values in the array. | |
580 int count = available_memories_count_.load(std::memory_order_acquire); | |
581 if (count >= kMaxThreadCount) { | |
582 NOTREACHED(); | |
583 // Storage is full. Just forget about this memory. It won't be re-used | |
584 // but there's no real loss. | |
585 break; | |
586 } | |
587 | |
588 // Write the reference of the memory being returned to this slot in the | |
589 // array. Empty slots have a value of zero so do an atomic compare-and- | |
590 // exchange to ensure that a race condition doesn't exist with another | |
591 // thread doing the same. | |
592 PersistentMemoryAllocator::Reference mem_expected = 0; | |
593 if (!available_memories_[count].compare_exchange_weak( | |
594 mem_expected, mem_reference, | |
595 std::memory_order_release, std::memory_order_relaxed)) { | |
596 continue; // Try again. | |
597 } | |
598 | |
599 // Increment the count, releasing the value written to the array. This | |
600 // could fail if a simultaneous "pop" operation decremented the counter. | |
601 // If that happens, clear the array slot and start over. Do a "strong" | |
602 // exchange to avoid spurious retries that can occur with a "weak" one. | |
603 int expected = count; // Updated by compare/exchange. | |
604 if (!available_memories_count_.compare_exchange_strong( | |
605 expected, count + 1, | |
606 std::memory_order_release, std::memory_order_relaxed)) { | |
607 available_memories_[count].store(0, std::memory_order_relaxed); | |
608 continue; | |
609 } | |
610 | |
611 // Count was successfully incremented to reflect the newly added value. | |
612 break; | |
613 } | |
614 } else { | |
615 // The memory was allocated from the process heap. This shouldn't happen | |
616 // because the persistent memory segment should be big enough for all | |
617 // thread stacks but it's better to support falling back to allocation | |
618 // from the heap rather than crash. Everything will work as normal but | |
619 // the data won't be persisted. | |
620 delete[] reinterpret_cast<char*>(mem_base); | |
621 } | |
622 } | |
623 | |
624 // static | |
625 void GlobalActivityTracker::OnTLSDestroy(void* value) { | |
626 delete reinterpret_cast<ManagedActivityTracker*>(value); | |
627 } | |
628 | |
629 | |
630 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | |
631 uint8_t action, | |
632 uint32_t id, | |
633 uint32_t info) | |
634 : GlobalActivityTracker::ScopedThreadActivity( | |
635 location.program_counter(), | |
636 static_cast<ThreadActivityTracker::ActivityType>( | |
637 ThreadActivityTracker::ACT_GENERIC | action), | |
638 ThreadActivityTracker::ActivityData::ForGeneric(id, info), | |
639 /*lock_allowed=*/true), | |
640 id_(id) { | |
641 // The action must not affect the category bits of the activity type. | |
642 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
643 } | |
644 | |
645 void ScopedActivity::ChangeAction(uint8_t action) { | |
646 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
647 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( | |
648 ThreadActivityTracker::ACT_GENERIC | action), | |
649 ThreadActivityTracker::kNullActivityData); | |
650 } | |
651 | |
652 void ScopedActivity::ChangeInfo(uint32_t info) { | |
653 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL, | |
654 ThreadActivityTracker::ActivityData::ForGeneric(id_, info)); | |
655 } | |
656 | |
657 void ScopedActivity::ChangeActionAndInfo(uint8_t action, uint32_t info) { | |
658 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
659 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( | |
660 ThreadActivityTracker::ACT_GENERIC | action), | |
661 ThreadActivityTracker::ActivityData::ForGeneric(id_, info)); | |
662 } | |
663 | |
664 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task) | |
665 : GlobalActivityTracker::ScopedThreadActivity( | |
666 task.posted_from.program_counter(), | |
667 ThreadActivityTracker::ACT_TASK_RUN, | |
668 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num), | |
669 /*lock_allowed=*/true) {} | |
670 | |
671 ScopedLockAcquireActivity::ScopedLockAcquireActivity( | |
672 const base::internal::LockImpl* lock) | |
673 : GlobalActivityTracker::ScopedThreadActivity( | |
674 nullptr, // TODO(bcwhite): Find a real address. | |
675 ThreadActivityTracker::ACT_LOCK_ACQUIRE, | |
676 ThreadActivityTracker::ActivityData::ForLock(lock), | |
677 /*lock_allowed=*/false) {} | |
678 | |
679 ScopedEventWaitActivity::ScopedEventWaitActivity( | |
680 const base::WaitableEvent* event) | |
681 : GlobalActivityTracker::ScopedThreadActivity( | |
682 nullptr, // TODO(bcwhite): Find a real address. | |
683 ThreadActivityTracker::ACT_EVENT_WAIT, | |
684 ThreadActivityTracker::ActivityData::ForEvent(event), | |
685 /*lock_allowed=*/true) {} | |
686 | |
687 ScopedThreadJoinActivity::ScopedThreadJoinActivity( | |
688 const base::PlatformThreadHandle* thread) | |
689 : GlobalActivityTracker::ScopedThreadActivity( | |
690 nullptr, // TODO(bcwhite): Find a real address. | |
691 ThreadActivityTracker::ACT_THREAD_JOIN, | |
692 ThreadActivityTracker::ActivityData::ForThread(*thread), | |
693 /*lock_allowed=*/true) {} | |
694 | |
695 #if !defined(OS_NACL) && !defined(OS_IOS) | |
696 ScopedProcessWaitActivity::ScopedProcessWaitActivity( | |
697 const base::Process* process) | |
698 : GlobalActivityTracker::ScopedThreadActivity( | |
699 nullptr, // TODO(bcwhite): Find a real address. | |
700 ThreadActivityTracker::ACT_PROCESS_WAIT, | |
701 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()), | |
702 /*lock_allowed=*/true) {} | |
703 #endif | |
704 | |
705 } // namespace debug | |
706 } // namespace base | |
OLD | NEW |