OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/debug/activity_tracker.h" | |
6 | |
7 #include <atomic> | |
manzagop (departed)
2016/07/01 18:26:25
nit: already in .h
bcwhite
2016/07/11 22:03:29
Done.
| |
8 | |
9 #include "base/debug/stack_trace.h" | |
10 #include "base/feature_list.h" | |
11 #include "base/files/file.h" | |
12 #include "base/files/file_path.h" | |
13 #include "base/files/memory_mapped_file.h" | |
14 #include "base/logging.h" | |
15 #include "base/memory/ptr_util.h" | |
16 #include "base/metrics/field_trial.h" | |
17 #include "base/metrics/histogram_macros.h" | |
18 #include "base/pending_task.h" | |
19 #include "base/process/process.h" | |
20 #include "base/process/process_handle.h" | |
21 #include "base/stl_util.h" | |
22 #include "base/strings/string_util.h" | |
23 #include "base/threading/platform_thread.h" | |
24 | |
25 namespace base { | |
26 namespace debug { | |
27 | |
28 namespace { | |
29 | |
30 // A number that identifies the memory as having been initialized. It's | |
31 // arbitrary but happens to be the first 8 bytes of SHA1(ThreadActivityTracker). | |
32 // A version number is added on so that major structure changes won't try to | |
33 // read an older version (since the cookie won't match). | |
34 const uint64_t kHeaderCookie = 0xC0029B240D4A3092ULL + 1; // v1 | |
manzagop (departed)
2016/07/01 18:26:25
Could we simply use kTypeIdActivityTracker? It see
bcwhite
2016/07/11 22:03:29
Not really. That identifier is part of the Global
| |
35 | |
36 // The minimum depth a stack should support. | |
37 const int kMinStackDepth = 2; | |
38 | |
39 } // namespace | |
40 | |
41 | |
42 #if !defined(OS_NACL) // NACL doesn't support any kind of file access in build. | |
43 void SetupGlobalActivityTrackerFieldTrial(const FilePath& file) { | |
44 const Feature kActivityTrackerFeature{ | |
45 "ActivityTracking", FEATURE_DISABLED_BY_DEFAULT | |
46 }; | |
47 | |
48 if (!base::FeatureList::IsEnabled(kActivityTrackerFeature)) | |
49 return; | |
50 | |
51 // TODO(bcwhite): Adjust these numbers once there is real data to show | |
52 // just how much of an arena is necessary. | |
53 const size_t kMemorySize = 1 << 20; // 1 MiB | |
54 const int kStackDepth = 4; | |
55 const uint64_t kAllocatorId = 0; | |
56 const char kAllocatorName[] = "ActivityTracker"; | |
57 | |
58 GlobalActivityTracker::CreateWithFile( | |
59 file.AddExtension(PersistentMemoryAllocator::kFileExtension), | |
60 kMemorySize, kAllocatorId, kAllocatorName, kStackDepth); | |
61 } | |
62 #endif // !defined(OS_NACL) | |
63 | |
64 | |
65 // This information is kept for every thread that is tracked. It is filled | |
66 // the very first time the thread is seen. All fields must be of exact sizes | |
67 // so there is no issue moving between 32 and 64-bit builds. | |
68 struct ThreadActivityTracker::Header { | |
69 // This unique number indicates a valid initialization of the memory. | |
manzagop (departed)
2016/07/01 18:26:25
Should this be atomic, so we never get a torn read
bcwhite
2016/07/11 22:03:29
Tearing is fine on a value that is only written on
| |
70 uint64_t cookie; | |
71 | |
72 // The process-id and thread-id to which this data belongs. These identifiers | |
73 // are not guaranteed to mean anything but are unique, in combination, among | |
74 // all active trackers. | |
75 int64_t process_id; | |
76 union { | |
77 int64_t as_id; | |
78 #if defined(OS_WIN) | |
79 // On Windows, the handle itself is often a pseudo-handle with a common | |
80 // value meaning "this thread" and so the thread-id is used. The former | |
81 // is can be converted to a thread-id with a system call. | |
manzagop (departed)
2016/07/01 18:26:25
nit: is can
bcwhite
2016/07/11 22:03:29
Done.
| |
82 PlatformThreadId as_tid; | |
83 #elif defined(OS_POSIX) | |
84 // On Posix, the handle is always a unique identifier so no conversion | |
85 // needs to be done. However, it's value is officially opaque so there | |
86 // is no one correct way to convert it to a numerical identifier. | |
87 PlatformThreadHandle::Handle as_handle; | |
88 #endif | |
89 } thread_ref; | |
90 | |
91 // The start-time and start-ticks when the data was created. Each activity | |
92 // record has a |time_internal| value that can be converted to a "wall time" | |
93 // with these two values. | |
94 int64_t start_time; | |
95 int64_t start_ticks; | |
96 | |
97 // The number of Activity slots in the data. | |
98 uint32_t stack_slots; | |
99 | |
100 // The current depth of the stack. This may be greater than the number of | |
101 // slots. If the depth exceeds the number of slots, the newest entries | |
102 // won't be recorded. | |
103 std::atomic<uint32_t> current_depth; | |
104 | |
105 // A memory location used to indicate if changes have been made to the stack | |
106 // that would invalidate an in-progress read of its contents. The active | |
107 // tracker will zero the value whenever something gets popped from the | |
108 // stack. A monitoring tracker can write a non-zero value here, copy the | |
109 // stack contents, and read the value to know, if it is still non-zero, that | |
110 // the contents didn't change while being copied. | |
manzagop (departed)
2016/07/01 18:26:25
Mention there can be only one reader to avoid ABA?
bcwhite
2016/07/11 22:03:29
It actually could support it if each snapshot oper
| |
111 std::atomic<uint32_t> stack_unchanged; | |
112 | |
113 // The name of the thread (up to a maximum length). Dynamic-length names | |
114 // are not practical since the memory has to come from the same persistent | |
115 // allocator that holds this structure and to which this object has no | |
116 // reference. | |
117 char thread_name[32]; | |
118 }; | |
119 | |
120 // It doesn't matter what is contained in this (though it will be all zeros) | |
121 // as only the address of it is important. | |
122 const ThreadActivityTracker::ActivityData | |
123 ThreadActivityTracker::kNullActivityData = {}; | |
124 | |
125 ThreadActivityTracker::ActivityData | |
126 ThreadActivityTracker::ActivityData::ForThread( | |
127 const PlatformThreadHandle& handle) { | |
128 // Header already has a conversion union; reuse that. | |
129 ThreadActivityTracker::Header header; | |
130 header.thread_ref.as_id = 0; // Zero the union in case other is smaller. | |
manzagop (departed)
2016/07/01 18:26:25
Do you need a static_assert to ensure the other th
| |
131 #if defined(OS_WIN) | |
132 header.thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); | |
133 #elif defined(OS_POSIX) | |
134 header.thread_ref.as_handle = handle.platform_handle(); | |
135 #endif | |
136 return ForThread(header.thread_ref.as_id); | |
137 } | |
138 | |
139 ThreadActivityTracker::ActivitySnapshot::ActivitySnapshot() {} | |
140 ThreadActivityTracker::ActivitySnapshot::~ActivitySnapshot() {} | |
141 | |
142 | |
143 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) | |
144 : header_(static_cast<Header*>(base)), | |
145 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + | |
146 sizeof(Header))), | |
147 stack_slots_( | |
148 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { | |
149 DCHECK(thread_checker_.CalledOnValidThread()); | |
150 | |
151 // Verify the parameters but fail gracefully if they're not valid so that | |
152 // production code based on external inputs will not crash. IsValid() will | |
153 // return false in this case. | |
154 if (!base || | |
155 // Ensure there is enough space for the header and at least a few records. | |
156 size < sizeof(Header) + kMinStackDepth * sizeof(Activity) || | |
157 // Ensure that the |stack_slots_| calculation didn't overflow. | |
158 (size - sizeof(Header)) / sizeof(Activity) > | |
159 std::numeric_limits<uint32_t>::max()) { | |
160 NOTREACHED(); | |
161 return; | |
162 } | |
163 | |
164 // Ensure that the thread reference doesn't exceed the size of the ID number. | |
165 // This won't compile at the global scope because Header is a private struct. | |
166 static_assert( | |
167 sizeof(header_->thread_ref) == sizeof(header_->thread_ref.as_id), | |
168 "PlatformThreadHandle::Handle is too big to hold in 64-bit ID"); | |
169 | |
170 // Provided memory should either be completely initialized or all zeros. | |
171 if (header_->cookie == 0) { | |
172 // This is a new file. Double-check other fields and then initialize. | |
173 DCHECK_EQ(0, header_->process_id); | |
174 DCHECK_EQ(0, header_->thread_ref.as_id); | |
175 DCHECK_EQ(0, header_->start_time); | |
176 DCHECK_EQ(0, header_->start_ticks); | |
177 DCHECK_EQ(0U, header_->stack_slots); | |
178 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); | |
179 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); | |
180 DCHECK_EQ(0, stack_[0].time_internal); | |
181 DCHECK_EQ(0U, stack_[0].origin_address); | |
182 DCHECK_EQ(0U, stack_[0].call_stack[0]); | |
183 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); | |
184 | |
185 header_->process_id = GetCurrentProcId(); | |
186 #if defined(OS_WIN) | |
187 header_->thread_ref.as_tid = PlatformThread::CurrentId(); | |
188 #elif defined(OS_POSIX) | |
189 header_->thread_ref.as_handle = | |
190 PlatformThread::CurrentHandle().platform_handle(); | |
191 #endif | |
192 header_->start_time = base::Time::Now().ToInternalValue(); | |
193 header_->start_ticks = base::TimeTicks::Now().ToInternalValue(); | |
194 header_->stack_slots = stack_slots_; | |
195 strlcpy(header_->thread_name, PlatformThread::GetName(), | |
196 sizeof(header_->thread_name)); | |
197 header_->cookie = kHeaderCookie; | |
198 valid_ = true; | |
199 DCHECK(IsValid()); | |
200 } else { | |
201 // This is a file with existing data. Perform basic consistency checks. | |
manzagop (departed)
2016/07/01 18:26:25
Is this from when the analyzer shared code with th
| |
202 valid_ = true; | |
203 valid_ = IsValid(); | |
204 } | |
205 } | |
206 | |
207 ThreadActivityTracker::~ThreadActivityTracker() {} | |
208 | |
209 void ThreadActivityTracker::PushActivity(const void* origin, | |
210 ActivityType type, | |
211 const ActivityData& data) { | |
212 // A thread-checker creates a lock to check the thread-id which means | |
213 // re-entry into this code if lock acquisitions are being tracked. | |
214 DCHECK(type == ACT_LOCK_ACQUIRE || thread_checker_.CalledOnValidThread()); | |
215 | |
216 // Get the current depth of the stack. No access to other memory guarded | |
217 // by this variable is done here so a "relaxed" load is acceptable. | |
218 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); | |
219 | |
220 // Handle the case where the stack depth has exceeded the storage capacity. | |
221 // Extra entries will be lost leaving only the base of the stack. | |
222 if (depth >= stack_slots_) { | |
223 // Since no other threads modify the data, no compare/exchange is needed. | |
224 // Since no other memory is being modified, a "relaxed" store is acceptable. | |
225 header_->current_depth.store(depth + 1, std::memory_order_relaxed); | |
226 return; | |
227 } | |
228 | |
229 // Get a pointer to the next activity and load it. No atomicity is required | |
230 // here because the memory is known only to this thread. It will be made | |
231 // known to other threads once the depth is incremented. | |
232 Activity* activity = &stack_[depth]; | |
233 activity->time_internal = base::TimeTicks::Now().ToInternalValue(); | |
234 activity->origin_address = reinterpret_cast<uintptr_t>(origin); | |
235 activity->activity_type = type; | |
236 activity->data = data; | |
237 | |
238 #if defined(SYZYASAN) | |
239 // Create a stacktrace from the current location and get the addresses. | |
240 StackTrace stack_trace; | |
241 size_t stack_depth; | |
242 const void* const* stack_addrs = stack_trace.Addresses(&stack_depth); | |
243 // Copy the stack addresses, ignoring the first one (here). | |
244 size_t i; | |
245 for (i = 1; i < stack_depth && i < kActivityCallStackSize; ++i) { | |
246 activity->call_stack[i - 1] = reinterpret_cast<uintptr_t>(stack_addrs[i]); | |
247 } | |
248 activity->call_stack[i - 1] = 0; | |
249 #else | |
250 // Since the memory was initially zero and nothing ever overwrites it in | |
251 // this "else" case, there is no need to write even the null terminator. | |
252 //activity->call_stack[0] = 0; | |
253 #endif | |
254 | |
255 // Save the incremented depth. Because this guards |activity| memory filled | |
256 // above that may be read by another thread once the recorded depth changes, | |
257 // a "release" store is required. | |
258 header_->current_depth.store(depth + 1, std::memory_order_release); | |
259 } | |
260 | |
261 void ThreadActivityTracker::ChangeActivity(ActivityType type, | |
262 const ActivityData& data) { | |
263 DCHECK(thread_checker_.CalledOnValidThread()); | |
264 DCHECK(type != ACT_NULL || &data != &kNullActivityData); | |
265 | |
266 // Get the current depth of the stack. This is acquiring the last-used stack | |
267 // slot but since that slot is determined as a result of this read, the | |
268 // memory-ordering constraint can be downgraded to "consume". | |
269 uint32_t depth = header_->current_depth.load(std::memory_order_consume); | |
270 DCHECK_LT(0U, depth); | |
271 | |
272 // Update the information if it is being recorded (i.e. within slot limit). | |
manzagop (departed)
2016/07/01 18:26:25
Hm, I'm not sure I follow this. Keep in mind I'm a
bcwhite
2016/07/11 22:03:29
Right. The update of the information is not atomi
| |
273 if (depth <= stack_slots_) { | |
274 Activity* activity = &stack_[depth - 1]; | |
275 | |
276 if (type != ACT_NULL) { | |
277 DCHECK_EQ(activity->activity_type & ACT_CATEGORY_MASK, | |
278 type & ACT_CATEGORY_MASK); | |
279 activity->activity_type = type; | |
280 } | |
281 | |
282 if (&data != &kNullActivityData) | |
283 activity->data = data; | |
284 } | |
285 } | |
286 | |
287 void ThreadActivityTracker::PopActivity() { | |
manzagop (departed)
2016/07/01 18:26:25
thread_checker?
bcwhite
2016/07/11 22:03:29
Done, though thread-checking is complicated due to
| |
288 // Do an atomic decrement of the depth. No changes to stack entries guarded | |
289 // by this variable are done here so a "relaxed" operation is acceptable. | |
290 // |depth| will receive the value BEFORE it was modified. | |
291 uint32_t depth = | |
292 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); | |
293 | |
294 // Validate that everything is running correctly. | |
295 DCHECK_LT(0U, depth); | |
296 | |
297 // The stack has shrunk meaning that some other thread trying to copy the | |
298 // contents for reporting purposes could get bad data. That thread would | |
299 // have written a non-zero value into |stack_unchanged|; clearing it here | |
300 // will let that thread detect that something did change. This needs to | |
301 // happen after the atomic |depth| operation above so a "release" store | |
302 // is required. | |
303 header_->stack_unchanged.store(0, std::memory_order_release); | |
304 } | |
305 | |
306 bool ThreadActivityTracker::IsValid() const { | |
307 if (header_->cookie != kHeaderCookie || | |
308 header_->process_id == 0 || | |
309 header_->thread_ref.as_id == 0 || | |
310 header_->start_time == 0 || | |
311 header_->start_ticks == 0 || | |
312 header_->stack_slots != stack_slots_ || | |
313 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { | |
314 return false; | |
315 } | |
316 | |
317 return valid_; | |
318 } | |
319 | |
320 bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const { | |
321 DCHECK(output_snapshot); | |
322 | |
323 // There is no "called on valid thread" check for this method as it can be | |
324 // called from other threads or even other processes. It is also the reason | |
325 // why atomic operations must be used in certain places above. | |
326 | |
327 // It's possible for the data to change while reading it in such a way that it | |
328 // invalidates the read. Make several attempts but don't try forever. | |
329 const int kMaxAttempts = 10; | |
330 uint32_t depth; | |
331 | |
332 // Stop here if the data isn't valid. | |
333 if (!IsValid()) | |
334 return false; | |
335 | |
336 // Start with an empty return stack. | |
337 output_snapshot->activity_stack.clear(); | |
338 | |
339 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { | |
340 // Remember the process and thread IDs to ensure they aren't replaced | |
341 // during the snapshot operation. | |
342 const int64_t starting_process_id = header_->process_id; | |
343 const int64_t starting_thread_id = header_->thread_ref.as_id; | |
344 | |
345 // Write a non-zero value to |stack_unchanged| so it's possible to detect | |
346 // at the end that nothing has changed since copying the data began. A | |
347 // "cst" operation is required to ensure it occurs before everything else. | |
348 // Using "cst" memory ordering is relatively expensive but this is only | |
349 // done during analysis so doesn't directly affect the worker threads. | |
350 header_->stack_unchanged.store(1, std::memory_order_seq_cst); | |
351 | |
352 // Fetching the current depth also "acquires" the contents of the stack. | |
353 depth = header_->current_depth.load(std::memory_order_acquire); | |
354 if (depth > 0) { | |
355 // Copy the existing contents. Memcpy is used for speed. | |
356 uint32_t count = std::min(depth, stack_slots_); | |
357 output_snapshot->activity_stack.resize(count); | |
manzagop (departed)
2016/07/01 18:26:25
Is it better to avoid the allocation while attempt
bcwhite
2016/07/11 22:03:30
Done.
| |
358 memcpy(&output_snapshot->activity_stack[0], stack_, | |
359 count * sizeof(Activity)); | |
360 } | |
361 | |
362 // Retry if something changed during the copy. A "cst" operation ensures | |
363 // it must happen after all the above operations. | |
364 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) | |
365 continue; | |
366 | |
367 // Stack copied. Record it's full depth. | |
368 output_snapshot->activity_stack_depth = depth; | |
369 | |
370 // TODO(bcwhite): Snapshot other things here. | |
371 | |
372 // Get the general thread information. | |
373 output_snapshot->process_id = header_->process_id; | |
374 output_snapshot->thread_id = header_->thread_ref.as_id; | |
375 output_snapshot->thread_name = | |
376 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); | |
377 | |
378 // All characters of the thread-name buffer were copied so as to not break | |
379 // if the trailing NUL were missing. Now limit the length if the actual | |
380 // name is shorter. | |
381 output_snapshot->thread_name.resize( | |
382 strlen(output_snapshot->thread_name.c_str())); | |
383 | |
384 // If the process or thread ID has changed then the tracker has exited and | |
385 // the memory reused by a new one. Try again. | |
manzagop (departed)
2016/07/01 18:26:25
Doesn't the header need to be guarded as well? Oth
bcwhite
2016/07/11 22:03:29
Not sure what you mean. Memory gets zeroed when a
manzagop (departed)
2016/07/26 21:25:33
I *think* I meant that you can read a wrong/empty
bcwhite
2016/07/29 17:38:38
Yes but then the pid/tid comparison on 386 would f
manzagop (departed)
2016/07/29 18:44:33
No sure I understand. Here's what I meant:
- Analy
bcwhite
2016/08/01 14:51:37
Right. Which is why the latest code loads, atomic
| |
386 if (output_snapshot->process_id != starting_process_id || | |
387 output_snapshot->thread_id != starting_thread_id) { | |
388 continue; | |
389 } | |
390 | |
391 // Only successful if the data is still valid once everything is done since | |
392 // it's possible for the thread to end somewhere in the middle and all its | |
393 // values become garbage. | |
394 if (!IsValid()) | |
395 return false; | |
396 | |
397 // Change all the timestamps in the activities from "ticks" to "wall" time. | |
398 const Time start_time = Time::FromInternalValue(header_->start_time); | |
399 const int64_t start_ticks = header_->start_ticks; | |
400 for (Activity& activity : output_snapshot->activity_stack) { | |
401 activity.time_internal = | |
402 (start_time + | |
403 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) | |
404 .ToInternalValue(); | |
405 } | |
406 | |
407 // Success! | |
408 return true; | |
409 } | |
410 | |
411 // Too many attempts. | |
412 return false; | |
413 } | |
414 | |
415 // static | |
416 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { | |
417 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); | |
418 } | |
419 | |
420 | |
421 GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr; | |
422 | |
423 GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker( | |
424 PersistentMemoryAllocator::Reference mem_reference, | |
425 void* base, | |
426 size_t size) | |
427 : ThreadActivityTracker(base, size), | |
428 mem_reference_(mem_reference), | |
429 mem_base_(base) {} | |
430 | |
431 GlobalActivityTracker::ManagedActivityTracker::~ManagedActivityTracker() { | |
432 // The global |g_tracker_| must point to the owner of this class since all | |
433 // objects of this type must be destructed before |g_tracker_| can be changed | |
434 // (something that only occurs in tests). | |
435 DCHECK(g_tracker_); | |
436 g_tracker_->ReturnTrackerMemory(this); | |
437 } | |
438 | |
439 void GlobalActivityTracker::CreateWithAllocator( | |
440 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
441 int stack_depth) { | |
442 // There's no need to do anything with the result. It is self-managing. | |
443 GlobalActivityTracker* global_tracker = | |
444 new GlobalActivityTracker(std::move(allocator), stack_depth); | |
445 // Create a tracker for this thread since it is known. | |
446 global_tracker->CreateTrackerForCurrentThread(); | |
447 } | |
448 | |
449 #if !defined(OS_NACL) | |
450 // static | |
451 void GlobalActivityTracker::CreateWithFile(const FilePath& file_path, | |
452 size_t size, | |
453 uint64_t id, | |
454 StringPiece name, | |
455 int stack_depth) { | |
456 DCHECK(!file_path.empty()); | |
457 DCHECK_GE(static_cast<uint64_t>(std::numeric_limits<int64_t>::max()), size); | |
458 | |
459 // Create and map the file into memory and make it globally available. | |
460 std::unique_ptr<MemoryMappedFile> mapped_file(new MemoryMappedFile()); | |
461 bool success = | |
462 mapped_file->Initialize(File(file_path, | |
463 File::FLAG_CREATE_ALWAYS | File::FLAG_READ | | |
464 File::FLAG_WRITE | File::FLAG_SHARE_DELETE), | |
465 {0, static_cast<int64_t>(size)}, | |
466 MemoryMappedFile::READ_WRITE_EXTEND); | |
467 DCHECK(success); | |
468 CreateWithAllocator(WrapUnique(new FilePersistentMemoryAllocator( | |
469 std::move(mapped_file), size, id, name, false)), | |
470 stack_depth); | |
471 } | |
472 #endif // !defined(OS_NACL) | |
473 | |
474 // static | |
475 void GlobalActivityTracker::CreateWithLocalMemory(size_t size, | |
476 uint64_t id, | |
477 StringPiece name, | |
478 int stack_depth) { | |
479 CreateWithAllocator( | |
480 WrapUnique(new LocalPersistentMemoryAllocator(size, id, name)), | |
481 stack_depth); | |
482 } | |
483 | |
484 ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() { | |
485 DCHECK(!this_thread_tracker_.Get()); | |
486 | |
487 PersistentMemoryAllocator::Reference mem_reference = 0; | |
488 void* mem_base = nullptr; | |
489 | |
490 // Get the current count of available memories, acquiring the array values. | |
491 int count = available_memories_count_.load(std::memory_order_acquire); | |
manzagop (departed)
2016/07/04 21:20:56
I wonder if there can be an ABA issue here?
Suppo
bcwhite
2016/07/11 15:24:18
Good observation! Fixed. (I think. ;-)
manzagop (departed)
2016/07/26 21:25:33
Looks good!
| |
492 while (count > 0) { | |
493 // There is a memory block that was previously released (and zero'd) so | |
494 // just re-use that rather than allocating a new one. Use "acquire" so | |
495 // operations below can be re-ordered above. | |
496 mem_reference = | |
497 available_memories_[count - 1].load(std::memory_order_acquire); | |
498 DCHECK(mem_reference); | |
499 | |
500 // Decrement the count indicating that the value has been taken. If this | |
501 // fails then something else, another thread doing push or pop, has changed | |
502 // the stack; retry if so. | |
503 // NOTE: |count| will be loaded with the existing value and affect the | |
504 // "while" condition. | |
505 if (!available_memories_count_.compare_exchange_weak( | |
506 count, count - 1, std::memory_order_acquire, | |
507 std::memory_order_acquire)) { | |
508 continue; | |
509 } | |
510 | |
511 // Clear the value just read from the array so that the "push" operation | |
512 // knows there is no value there and will work correctly. | |
513 available_memories_[count - 1].store(0, std::memory_order_relaxed); | |
514 | |
515 // Turn the reference back into one of the activity-tracker type. | |
516 mem_base = allocator_->GetAsObject<char>(mem_reference, | |
517 kTypeIdActivityTrackerFree); | |
518 DCHECK(mem_base); | |
519 DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference)); | |
520 allocator_->ChangeType(mem_reference, kTypeIdActivityTracker, | |
521 kTypeIdActivityTrackerFree); | |
522 | |
523 // Success. | |
524 break; | |
525 } | |
526 | |
527 // Handle the case where no previously-used memories are available. | |
528 if (count == 0) { | |
529 // Allocate a block of memory from the persistent segment. | |
530 mem_reference = | |
531 allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker); | |
532 if (mem_reference) { | |
533 // Success. Convert the reference to an actual memory address. | |
534 mem_base = | |
535 allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker); | |
536 // Make the allocation iterable so it can be found by other processes. | |
537 allocator_->MakeIterable(mem_reference); | |
538 } else { | |
539 // Failure. This shouldn't happen. | |
540 NOTREACHED(); | |
541 // But if it does, probably because the allocator wasn't given enough | |
542 // memory to satisfy all possible requests, handle it gracefully by | |
543 // allocating the required memory from the heap. | |
544 mem_base = new char[stack_memory_size_]; | |
545 memset(mem_base, 0, stack_memory_size_); | |
546 // Report the thread-count at which the allocator was full so that the | |
547 // failure can be seen and underlying memory resized appropriately. | |
548 UMA_HISTOGRAM_COUNTS_1000( | |
549 "UMA.ActivityTracker.ThreadTrackers.MemLimit", | |
manzagop (departed)
2016/07/04 21:20:56
nit: MemLimit sounds like it measures memory (eg i
bcwhite
2016/07/11 15:24:18
Done.
| |
550 thread_tracker_count_.load(std::memory_order_relaxed)); | |
551 } | |
552 } | |
553 | |
554 // Create a tracker with the acquired memory and set it as the tracker | |
555 // for this particular thread in thread-local-storage. | |
556 DCHECK(mem_base); | |
557 ManagedActivityTracker* tracker = | |
558 new ManagedActivityTracker(mem_reference, mem_base, stack_memory_size_); | |
559 DCHECK(tracker->IsValid()); | |
560 this_thread_tracker_.Set(tracker); | |
561 int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed); | |
562 | |
563 UMA_HISTOGRAM_ENUMERATION("UMA.ActivityTracker.ThreadTrackers.Count", | |
564 old_count + 1, kMaxThreadCount); | |
manzagop (departed)
2016/07/04 21:20:56
Is it a problem that old_count can be > kMaxThread
bcwhite
2016/07/11 15:24:18
Histograms have an "overflow" bucket for anything
| |
565 return tracker; | |
566 } | |
567 | |
568 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { | |
569 ThreadActivityTracker* tracker = | |
570 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); | |
571 if (tracker) { | |
572 this_thread_tracker_.Free(); | |
573 delete tracker; | |
574 } | |
575 } | |
576 | |
577 GlobalActivityTracker::GlobalActivityTracker( | |
578 std::unique_ptr<PersistentMemoryAllocator> allocator, | |
579 int stack_depth) | |
580 : allocator_(std::move(allocator)), | |
581 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), | |
582 this_thread_tracker_(&OnTLSDestroy), | |
583 thread_tracker_count_(0), | |
584 available_memories_count_(0) { | |
585 // Clear the available-memories array. | |
586 memset(available_memories_, 0, sizeof(available_memories_)); | |
587 | |
588 // Ensure the passed memory is valid and empty (iterator finds nothing). | |
589 uint32_t type; | |
590 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); | |
591 | |
592 // Ensure that there is no other global object and then make this one such. | |
593 DCHECK(!g_tracker_); | |
594 g_tracker_ = this; | |
595 } | |
596 | |
597 GlobalActivityTracker::~GlobalActivityTracker() { | |
598 DCHECK_EQ(g_tracker_, this); | |
599 DCHECK_EQ(0, thread_tracker_count_.load(std::memory_order_relaxed)); | |
600 g_tracker_ = nullptr; | |
601 } | |
602 | |
603 void GlobalActivityTracker::ReturnTrackerMemory( | |
604 ManagedActivityTracker* tracker) { | |
605 PersistentMemoryAllocator::Reference mem_reference = tracker->mem_reference_; | |
606 void* mem_base = tracker->mem_base_; | |
607 | |
608 // Zero the memory so that it is ready for use if needed again later. It's | |
609 // better to clear the memory now, when a thread is exiting, than to do it | |
610 // when it is first needed by a thread doing actual work. | |
611 memset(mem_base, 0, stack_memory_size_); | |
612 | |
613 // Remove the destructed tracker from the set of known ones. | |
614 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); | |
615 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); | |
616 | |
617 // Deal with the memory that was used by the tracker. | |
618 if (mem_reference) { | |
619 // The memory was within the persistent memory allocator. Change its type | |
620 // so that iteration won't find it. | |
621 allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree, | |
622 kTypeIdActivityTracker); | |
623 // There is no way to free memory from a persistent allocator so instead | |
624 // push it on the internal list of available memory blocks. | |
625 while (true) { | |
626 // Get the existing count of available memories and ensure we won't | |
627 // burst the array. Acquire the values in the array. | |
628 int count = available_memories_count_.load(std::memory_order_acquire); | |
629 if (count >= kMaxThreadCount) { | |
630 NOTREACHED(); | |
631 // Storage is full. Just forget about this memory. It won't be re-used | |
632 // but there's no real loss. | |
633 break; | |
634 } | |
635 | |
636 // Write the reference of the memory being returned to this slot in the | |
637 // array. Empty slots have a value of zero so do an atomic compare-and- | |
638 // exchange to ensure that a race condition doesn't exist with another | |
639 // thread doing the same. | |
640 PersistentMemoryAllocator::Reference mem_expected = 0; | |
641 if (!available_memories_[count].compare_exchange_weak( | |
642 mem_expected, mem_reference, std::memory_order_release, | |
643 std::memory_order_relaxed)) { | |
644 continue; // Try again. | |
645 } | |
646 | |
647 // Increment the count, releasing the value written to the array. This | |
648 // could fail if a simultaneous "pop" operation decremented the counter. | |
649 // If that happens, clear the array slot and start over. Do a "strong" | |
650 // exchange to avoid spurious retries that can occur with a "weak" one. | |
651 int expected = count; // Updated by compare/exchange. | |
652 if (!available_memories_count_.compare_exchange_strong( | |
653 expected, count + 1, std::memory_order_release, | |
654 std::memory_order_relaxed)) { | |
655 available_memories_[count].store(0, std::memory_order_relaxed); | |
656 continue; | |
657 } | |
658 | |
659 // Count was successfully incremented to reflect the newly added value. | |
660 break; | |
661 } | |
662 } else { | |
663 // The memory was allocated from the process heap. This shouldn't happen | |
664 // because the persistent memory segment should be big enough for all | |
665 // thread stacks but it's better to support falling back to allocation | |
666 // from the heap rather than crash. Everything will work as normal but | |
667 // the data won't be persisted. | |
668 delete[] reinterpret_cast<char*>(mem_base); | |
669 } | |
670 } | |
671 | |
672 // static | |
673 void GlobalActivityTracker::OnTLSDestroy(void* value) { | |
674 delete reinterpret_cast<ManagedActivityTracker*>(value); | |
675 } | |
676 | |
677 | |
678 ScopedActivity::ScopedActivity(const tracked_objects::Location& location, | |
679 uint8_t action, | |
680 uint32_t id, | |
681 int32_t info) | |
682 : GlobalActivityTracker::ScopedThreadActivity( | |
683 location.program_counter(), | |
684 static_cast<ThreadActivityTracker::ActivityType>( | |
685 ThreadActivityTracker::ACT_GENERIC | action), | |
686 ThreadActivityTracker::ActivityData::ForGeneric(id, info), | |
687 /*lock_allowed=*/true), | |
688 id_(id) { | |
689 // The action must not affect the category bits of the activity type. | |
690 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
691 } | |
692 | |
693 void ScopedActivity::ChangeAction(uint8_t action) { | |
694 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
695 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( | |
696 ThreadActivityTracker::ACT_GENERIC | action), | |
697 ThreadActivityTracker::kNullActivityData); | |
698 } | |
699 | |
700 void ScopedActivity::ChangeInfo(int32_t info) { | |
701 ChangeTypeAndData(ThreadActivityTracker::ACT_NULL, | |
702 ThreadActivityTracker::ActivityData::ForGeneric(id_, info)); | |
703 } | |
704 | |
705 void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) { | |
706 DCHECK_EQ(0, action & ThreadActivityTracker::ACT_CATEGORY_MASK); | |
707 ChangeTypeAndData(static_cast<ThreadActivityTracker::ActivityType>( | |
708 ThreadActivityTracker::ACT_GENERIC | action), | |
709 ThreadActivityTracker::ActivityData::ForGeneric(id_, info)); | |
710 } | |
711 | |
712 ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task) | |
713 : GlobalActivityTracker::ScopedThreadActivity( | |
714 task.posted_from.program_counter(), | |
715 ThreadActivityTracker::ACT_TASK_RUN, | |
716 ThreadActivityTracker::ActivityData::ForTask(task.sequence_num), | |
717 /*lock_allowed=*/true) {} | |
718 | |
719 ScopedLockAcquireActivity::ScopedLockAcquireActivity( | |
720 const base::internal::LockImpl* lock) | |
721 : GlobalActivityTracker::ScopedThreadActivity( | |
722 nullptr, | |
723 ThreadActivityTracker::ACT_LOCK_ACQUIRE, | |
724 ThreadActivityTracker::ActivityData::ForLock(lock), | |
725 /*lock_allowed=*/false) {} | |
726 | |
727 ScopedEventWaitActivity::ScopedEventWaitActivity( | |
728 const base::WaitableEvent* event) | |
729 : GlobalActivityTracker::ScopedThreadActivity( | |
730 nullptr, | |
731 ThreadActivityTracker::ACT_EVENT_WAIT, | |
732 ThreadActivityTracker::ActivityData::ForEvent(event), | |
733 /*lock_allowed=*/true) {} | |
734 | |
735 ScopedThreadJoinActivity::ScopedThreadJoinActivity( | |
736 const base::PlatformThreadHandle* thread) | |
737 : GlobalActivityTracker::ScopedThreadActivity( | |
738 nullptr, | |
739 ThreadActivityTracker::ACT_THREAD_JOIN, | |
740 ThreadActivityTracker::ActivityData::ForThread(*thread), | |
741 /*lock_allowed=*/true) {} | |
742 | |
743 #if !defined(OS_NACL) && !defined(OS_IOS) | |
744 ScopedProcessWaitActivity::ScopedProcessWaitActivity( | |
745 const base::Process* process) | |
746 : GlobalActivityTracker::ScopedThreadActivity( | |
747 nullptr, | |
748 ThreadActivityTracker::ACT_PROCESS_WAIT, | |
749 ThreadActivityTracker::ActivityData::ForProcess(process->Pid()), | |
750 /*lock_allowed=*/true) {} | |
751 #endif | |
752 | |
753 } // namespace debug | |
754 } // namespace base | |
OLD | NEW |