Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1294)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2422213002: Added support for storing arbitrary user data. (Closed)
Patch Set: addressed comments by PA Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm>
8
7 #include "base/debug/stack_trace.h" 9 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 10 #include "base/files/file.h"
9 #include "base/files/file_path.h" 11 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 12 #include "base/files/memory_mapped_file.h"
11 #include "base/logging.h" 13 #include "base/logging.h"
12 #include "base/memory/ptr_util.h" 14 #include "base/memory/ptr_util.h"
13 #include "base/metrics/field_trial.h" 15 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram_macros.h" 16 #include "base/metrics/histogram_macros.h"
15 #include "base/pending_task.h" 17 #include "base/pending_task.h"
16 #include "base/process/process.h" 18 #include "base/process/process.h"
17 #include "base/process/process_handle.h" 19 #include "base/process/process_handle.h"
18 #include "base/stl_util.h" 20 #include "base/stl_util.h"
19 #include "base/strings/string_util.h" 21 #include "base/strings/string_util.h"
20 #include "base/threading/platform_thread.h" 22 #include "base/threading/platform_thread.h"
21 23
22 namespace base { 24 namespace base {
23 namespace debug { 25 namespace debug {
24 26
25 namespace { 27 namespace {
26 28
27 // A number that identifies the memory as having been initialized. It's 29 // A number that identifies the memory as having been initialized. It's
28 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). 30 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
29 // A version number is added on so that major structure changes won't try to 31 // A version number is added on so that major structure changes won't try to
30 // read an older version (since the cookie won't match). 32 // read an older version (since the cookie won't match).
31 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 33 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
32 34
33 // The minimum depth a stack should support. 35 // The minimum depth a stack should support.
34 const int kMinStackDepth = 2; 36 const int kMinStackDepth = 2;
35 37
38 // The amount of memory set aside for holding arbitrary user data (key/value
39 // pairs) globally or associated with ActivityData entries.
40 const size_t kUserDataSize = 1024; // bytes
41 const size_t kGlobalDataSize = 1024; // bytes
42 const size_t kMaxUserDataNameLength =
43 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
44
36 union ThreadRef { 45 union ThreadRef {
37 int64_t as_id; 46 int64_t as_id;
38 #if defined(OS_WIN) 47 #if defined(OS_WIN)
39 // On Windows, the handle itself is often a pseudo-handle with a common 48 // On Windows, the handle itself is often a pseudo-handle with a common
40 // value meaning "this thread" and so the thread-id is used. The former 49 // value meaning "this thread" and so the thread-id is used. The former
41 // can be converted to a thread-id with a system call. 50 // can be converted to a thread-id with a system call.
42 PlatformThreadId as_tid; 51 PlatformThreadId as_tid;
43 #elif defined(OS_POSIX) 52 #elif defined(OS_POSIX)
44 // On Posix, the handle is always a unique identifier so no conversion 53 // On Posix, the handle is always a unique identifier so no conversion
45 // needs to be done. However, it's value is officially opaque so there 54 // needs to be done. However, it's value is officially opaque so there
46 // is no one correct way to convert it to a numerical identifier. 55 // is no one correct way to convert it to a numerical identifier.
47 PlatformThreadHandle::Handle as_handle; 56 PlatformThreadHandle::Handle as_handle;
48 #endif 57 #endif
49 }; 58 };
50 59
60 // Determines the next aligned index.
61 size_t RoundUpToAlignment(size_t index, size_t alignment) {
62 return (index + (alignment - 1)) & (0 - alignment);
63 }
64
51 } // namespace 65 } // namespace
52 66
53 67
54 // It doesn't matter what is contained in this (though it will be all zeros) 68 // It doesn't matter what is contained in this (though it will be all zeros)
55 // as only the address of it is important. 69 // as only the address of it is important.
56 const ActivityData kNullActivityData = {}; 70 const ActivityData kNullActivityData = {};
57 71
58 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 72 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
59 ThreadRef thread_ref; 73 ThreadRef thread_ref;
60 thread_ref.as_id = 0; // Zero the union in case other is smaller. 74 thread_ref.as_id = 0; // Zero the union in case other is smaller.
61 #if defined(OS_WIN) 75 #if defined(OS_WIN)
62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 76 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
63 #elif defined(OS_POSIX) 77 #elif defined(OS_POSIX)
64 thread_ref.as_handle = handle.platform_handle(); 78 thread_ref.as_handle = handle.platform_handle();
65 #endif 79 #endif
66 return ForThread(thread_ref.as_id); 80 return ForThread(thread_ref.as_id);
67 } 81 }
68 82
69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator( 83 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
70 PersistentMemoryAllocator* allocator, 84 PersistentMemoryAllocator* allocator,
71 uint32_t object_type, 85 uint32_t object_type,
72 uint32_t object_free_type, 86 uint32_t object_free_type,
73 size_t object_size, 87 size_t object_size,
74 size_t cache_size) 88 size_t cache_size,
89 bool make_iterable)
75 : allocator_(allocator), 90 : allocator_(allocator),
76 object_type_(object_type), 91 object_type_(object_type),
77 object_free_type_(object_free_type), 92 object_free_type_(object_free_type),
78 object_size_(object_size), 93 object_size_(object_size),
79 cache_size_(cache_size), 94 cache_size_(cache_size),
95 make_iterable_(make_iterable),
80 iterator_(allocator), 96 iterator_(allocator),
81 cache_values_(new Reference[cache_size]), 97 cache_values_(new Reference[cache_size]),
82 cache_used_(0) { 98 cache_used_(0) {
83 DCHECK(allocator); 99 DCHECK(allocator);
84 } 100 }
85 101
86 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {} 102 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
87 103
88 ActivityTrackerMemoryAllocator::Reference 104 ActivityTrackerMemoryAllocator::Reference
89 ActivityTrackerMemoryAllocator::GetObjectReference() { 105 ActivityTrackerMemoryAllocator::GetObjectReference() {
(...skipping 30 matching lines...) Expand all
120 break; 136 break;
121 } 137 }
122 if (!found) { 138 if (!found) {
123 // Reached end; start over at the beginning. 139 // Reached end; start over at the beginning.
124 iterator_.Reset(); 140 iterator_.Reset();
125 } 141 }
126 } 142 }
127 143
128 // No free block was found so instead allocate a new one. 144 // No free block was found so instead allocate a new one.
129 Reference allocated = allocator_->Allocate(object_size_, object_type_); 145 Reference allocated = allocator_->Allocate(object_size_, object_type_);
130 if (allocated) 146 if (allocated && make_iterable_)
131 allocator_->MakeIterable(allocated); 147 allocator_->MakeIterable(allocated);
132 return allocated; 148 return allocated;
133 } 149 }
134 150
135 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) { 151 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
136 // Zero the memory so that it is ready for immediate use if needed later. 152 // Zero the memory so that it is ready for immediate use if needed later.
137 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_); 153 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
138 DCHECK(mem_base); 154 DCHECK(mem_base);
139 memset(mem_base, 0, object_size_); 155 memset(mem_base, 0, object_size_);
140 156
(...skipping 30 matching lines...) Expand all
171 } 187 }
172 activity->call_stack[i - 1] = 0; 188 activity->call_stack[i - 1] = 0;
173 #else 189 #else
174 activity->call_stack[0] = 0; 190 activity->call_stack[0] = 0;
175 #endif 191 #endif
176 } 192 }
177 193
178 ActivitySnapshot::ActivitySnapshot() {} 194 ActivitySnapshot::ActivitySnapshot() {}
179 ActivitySnapshot::~ActivitySnapshot() {} 195 ActivitySnapshot::~ActivitySnapshot() {}
180 196
197 ActivityUserData::ValueInfo::ValueInfo() {}
198 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
199 ActivityUserData::ValueInfo::~ValueInfo() {}
200
201 ActivityUserData::ActivityUserData(void* memory, size_t size)
202 : memory_(static_cast<char*>(memory)), available_(size) {}
203
204 ActivityUserData::~ActivityUserData() {}
205
206 void ActivityUserData::Set(StringPiece name,
207 ValueType type,
208 const void* memory,
209 size_t size) {
210 DCHECK(thread_checker_.CalledOnValidThread());
211 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
212 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
213 size);
214
215 // It's possible that no user data is being stored.
216 if (!memory_)
217 return;
218
219 // The storage of a name is limited so use that limit during lookup.
220 if (name.length() > kMaxUserDataNameLength)
221 name.set(name.data(), kMaxUserDataNameLength);
222
223 ValueInfo* info;
224 auto existing = values_.find(name);
225 if (existing != values_.end()) {
226 info = &existing->second;
227 } else {
228 // The name size is limited to what can be held in a single byte but
229 // because there are not alignment constraints on strings, it's set tight
230 // against the header. It's extent (the reserved space, even if it's not
231 // all used) is calculated so that, when pressed against the header, the
232 // following field will be aligned properly.
233 size_t name_size = std::min(name.length(), kMaxUserDataNameLength);
manzagop (departed) 2016/11/14 19:51:17 No need for the min given the new override at l.21
bcwhite 2016/11/15 13:37:47 Done.
234 size_t name_extent =
235 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
236 sizeof(Header);
237 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
238
239 // The "basic size" is the minimum size of the record. It's possible that
240 // lengthy values will get truncated but there must be at least some bytes
241 // available.
242 size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment;
243 if (basic_size > available_)
244 return; // No space to store even the smallest value.
245
246 // The "full size" is the size for storing the entire value, truncated
247 // to the amount of available memory.
248 size_t full_size =
249 std::min(sizeof(Header) + name_extent + value_extent, available_);
250 size = std::min(full_size - sizeof(Header) - name_extent, size);
251
252 // Allocate a chunk of memory.
253 Header* header = reinterpret_cast<Header*>(memory_);
254 memory_ += full_size;
255 available_ -= full_size;
256
257 // Datafill the header and name records. Memory must be zeroed. The |type|
258 // is written last, atomically, to release all the other values.
259 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
260 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
261 header->name_size = static_cast<uint8_t>(name_size);
262 header->record_size = full_size;
263 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
264 void* value_memory =
265 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
266 memcpy(name_memory, name.data(), name_size);
267 header->type.store(type, std::memory_order_release);
268
269 // Create an entry in |values_| so that this field can be found and changed
270 // later on without having to allocate new entries.
271 StringPiece persistent_name(name_memory, name_size);
272 auto inserted =
273 values_.insert(std::make_pair(persistent_name, ValueInfo()));
274 DCHECK(inserted.second); // True if inserted, false if existed.
275 info = &inserted.first->second;
276 info->name = persistent_name;
277 info->memory = value_memory;
278 info->size_ptr = &header->value_size;
279 info->extent = full_size - sizeof(Header) - name_extent;
280 info->type = type;
281 }
282
283 // Copy the value data to storage. The |size| is written last, atomically, to
284 // release the copied data. Until then, a parallel reader will just ignore
285 // records with a zero size.
286 DCHECK_EQ(type, info->type);
287 size = std::min(size, info->extent);
288 info->size_ptr->store(0, std::memory_order_acq_rel);
289 memcpy(info->memory, memory, size);
290 info->size_ptr->store(size, std::memory_order_release);
291 }
292
293 void ActivityUserData::SetReference(StringPiece name,
294 ValueType type,
295 const void* memory,
296 size_t size) {
297 ReferenceRecord rec;
298 rec.address = reinterpret_cast<uintptr_t>(memory);
299 rec.size = size;
300 Set(name, type, &rec, sizeof(rec));
301 }
181 302
182 // This information is kept for every thread that is tracked. It is filled 303 // This information is kept for every thread that is tracked. It is filled
183 // the very first time the thread is seen. All fields must be of exact sizes 304 // the very first time the thread is seen. All fields must be of exact sizes
184 // so there is no issue moving between 32 and 64-bit builds. 305 // so there is no issue moving between 32 and 64-bit builds.
185 struct ThreadActivityTracker::Header { 306 struct ThreadActivityTracker::Header {
186 // This unique number indicates a valid initialization of the memory. 307 // This unique number indicates a valid initialization of the memory.
187 std::atomic<uint32_t> cookie; 308 std::atomic<uint32_t> cookie;
188 uint32_t reserved; // pad out to 64 bits 309 uint32_t reserved; // pad out to 64 bits
189 310
190 // The process-id and thread-id (thread_ref.as_id) to which this data belongs. 311 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 // is not the current implementation so no parallel snapshots allowed). 346 // is not the current implementation so no parallel snapshots allowed).
226 std::atomic<uint32_t> stack_unchanged; 347 std::atomic<uint32_t> stack_unchanged;
227 348
228 // The name of the thread (up to a maximum length). Dynamic-length names 349 // The name of the thread (up to a maximum length). Dynamic-length names
229 // are not practical since the memory has to come from the same persistent 350 // are not practical since the memory has to come from the same persistent
230 // allocator that holds this structure and to which this object has no 351 // allocator that holds this structure and to which this object has no
231 // reference. 352 // reference.
232 char thread_name[32]; 353 char thread_name[32];
233 }; 354 };
234 355
356 ThreadActivityTracker::ScopedActivity::ScopedActivity(
357 ThreadActivityTracker* tracker,
358 const void* origin,
359 Activity::Type type,
360 const ActivityData& data)
361 : tracker_(tracker) {
362 if (tracker_)
363 activity_id_ = tracker_->PushActivity(origin, type, data);
364 }
365
366 ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
367 if (tracker_)
368 tracker_->PopActivity(activity_id_);
369 }
370
371 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
372 Activity::Type type,
373 const ActivityData& data) {
374 if (tracker_)
375 tracker_->ChangeActivity(activity_id_, type, data);
376 }
377
378 ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() {
379 if (!user_data_) {
380 if (tracker_)
381 user_data_ = tracker_->GetUserData(activity_id_);
382 else
383 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
384 }
385 return *user_data_;
386 }
387
235 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) 388 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
236 : header_(static_cast<Header*>(base)), 389 : header_(static_cast<Header*>(base)),
237 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + 390 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
238 sizeof(Header))), 391 sizeof(Header))),
239 stack_slots_( 392 stack_slots_(
240 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { 393 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
241 DCHECK(thread_checker_.CalledOnValidThread()); 394 DCHECK(thread_checker_.CalledOnValidThread());
242 395
243 // Verify the parameters but fail gracefully if they're not valid so that 396 // Verify the parameters but fail gracefully if they're not valid so that
244 // production code based on external inputs will not crash. IsValid() will 397 // production code based on external inputs will not crash. IsValid() will
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 DCHECK(IsValid()); 455 DCHECK(IsValid());
303 } else { 456 } else {
304 // This is a file with existing data. Perform basic consistency checks. 457 // This is a file with existing data. Perform basic consistency checks.
305 valid_ = true; 458 valid_ = true;
306 valid_ = IsValid(); 459 valid_ = IsValid();
307 } 460 }
308 } 461 }
309 462
310 ThreadActivityTracker::~ThreadActivityTracker() {} 463 ThreadActivityTracker::~ThreadActivityTracker() {}
311 464
312 void ThreadActivityTracker::PushActivity(const void* origin, 465 ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
313 Activity::Type type, 466 const void* origin,
314 const ActivityData& data) { 467 Activity::Type type,
468 const ActivityData& data) {
315 // A thread-checker creates a lock to check the thread-id which means 469 // A thread-checker creates a lock to check the thread-id which means
316 // re-entry into this code if lock acquisitions are being tracked. 470 // re-entry into this code if lock acquisitions are being tracked.
317 DCHECK(type == Activity::ACT_LOCK_ACQUIRE || 471 DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
318 thread_checker_.CalledOnValidThread()); 472 thread_checker_.CalledOnValidThread());
319 473
320 // Get the current depth of the stack. No access to other memory guarded 474 // Get the current depth of the stack. No access to other memory guarded
321 // by this variable is done here so a "relaxed" load is acceptable. 475 // by this variable is done here so a "relaxed" load is acceptable.
322 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); 476 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
323 477
324 // Handle the case where the stack depth has exceeded the storage capacity. 478 // Handle the case where the stack depth has exceeded the storage capacity.
325 // Extra entries will be lost leaving only the base of the stack. 479 // Extra entries will be lost leaving only the base of the stack.
326 if (depth >= stack_slots_) { 480 if (depth >= stack_slots_) {
327 // Since no other threads modify the data, no compare/exchange is needed. 481 // Since no other threads modify the data, no compare/exchange is needed.
328 // Since no other memory is being modified, a "relaxed" store is acceptable. 482 // Since no other memory is being modified, a "relaxed" store is acceptable.
329 header_->current_depth.store(depth + 1, std::memory_order_relaxed); 483 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
330 return; 484 return depth;
331 } 485 }
332 486
333 // Get a pointer to the next activity and load it. No atomicity is required 487 // Get a pointer to the next activity and load it. No atomicity is required
334 // here because the memory is known only to this thread. It will be made 488 // here because the memory is known only to this thread. It will be made
335 // known to other threads once the depth is incremented. 489 // known to other threads once the depth is incremented.
336 Activity::FillFrom(&stack_[depth], origin, type, data); 490 Activity::FillFrom(&stack_[depth], origin, type, data);
337 491
338 // Save the incremented depth. Because this guards |activity| memory filled 492 // Save the incremented depth. Because this guards |activity| memory filled
339 // above that may be read by another thread once the recorded depth changes, 493 // above that may be read by another thread once the recorded depth changes,
340 // a "release" store is required. 494 // a "release" store is required.
341 header_->current_depth.store(depth + 1, std::memory_order_release); 495 header_->current_depth.store(depth + 1, std::memory_order_release);
496
497 // The current depth is used as the activity ID because it simply identifies
498 // an entry. Once an entry is pop'd, it's okay to reuse the ID.
499 return depth;
342 } 500 }
343 501
344 void ThreadActivityTracker::ChangeActivity(Activity::Type type, 502 void ThreadActivityTracker::ChangeActivity(ActivityId id,
503 Activity::Type type,
345 const ActivityData& data) { 504 const ActivityData& data) {
346 DCHECK(thread_checker_.CalledOnValidThread()); 505 DCHECK(thread_checker_.CalledOnValidThread());
347 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData); 506 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
348 507 DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
349 // Get the current depth of the stack and acquire the data held there.
350 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
351 DCHECK_LT(0U, depth);
352 508
353 // Update the information if it is being recorded (i.e. within slot limit). 509 // Update the information if it is being recorded (i.e. within slot limit).
354 if (depth <= stack_slots_) { 510 if (id < stack_slots_) {
355 Activity* activity = &stack_[depth - 1]; 511 Activity* activity = &stack_[id];
356 512
357 if (type != Activity::ACT_NULL) { 513 if (type != Activity::ACT_NULL) {
358 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK, 514 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
359 type & Activity::ACT_CATEGORY_MASK); 515 type & Activity::ACT_CATEGORY_MASK);
360 activity->activity_type = type; 516 activity->activity_type = type;
361 } 517 }
362 518
363 if (&data != &kNullActivityData) 519 if (&data != &kNullActivityData)
364 activity->data = data; 520 activity->data = data;
365 } 521 }
366 } 522 }
367 523
368 void ThreadActivityTracker::PopActivity() { 524 void ThreadActivityTracker::PopActivity(ActivityId id) {
369 // Do an atomic decrement of the depth. No changes to stack entries guarded 525 // Do an atomic decrement of the depth. No changes to stack entries guarded
370 // by this variable are done here so a "relaxed" operation is acceptable. 526 // by this variable are done here so a "relaxed" operation is acceptable.
371 // |depth| will receive the value BEFORE it was modified. 527 // |depth| will receive the value BEFORE it was modified which means the
528 // return value must also be decremented. The slot will be "free" after
529 // this call but since only a single thread can access this object, the
530 // data will remain valid until this method returns or calls outside.
372 uint32_t depth = 531 uint32_t depth =
373 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); 532 header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
374 533
375 // Validate that everything is running correctly. 534 // Validate that everything is running correctly.
376 DCHECK_LT(0U, depth); 535 DCHECK_EQ(id, depth);
377 536
378 // A thread-checker creates a lock to check the thread-id which means 537 // A thread-checker creates a lock to check the thread-id which means
379 // re-entry into this code if lock acquisitions are being tracked. 538 // re-entry into this code if lock acquisitions are being tracked.
380 DCHECK(stack_[depth - 1].activity_type == Activity::ACT_LOCK_ACQUIRE || 539 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
381 thread_checker_.CalledOnValidThread()); 540 thread_checker_.CalledOnValidThread());
382 541
542 // Check if there was any user-data memory. It isn't free'd until later
543 // because the call to release it can push something on the stack.
544 PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data;
545 stack_[depth].user_data = 0;
546
383 // The stack has shrunk meaning that some other thread trying to copy the 547 // The stack has shrunk meaning that some other thread trying to copy the
384 // contents for reporting purposes could get bad data. That thread would 548 // contents for reporting purposes could get bad data. That thread would
385 // have written a non-zero value into |stack_unchanged|; clearing it here 549 // have written a non-zero value into |stack_unchanged|; clearing it here
386 // will let that thread detect that something did change. This needs to 550 // will let that thread detect that something did change. This needs to
387 // happen after the atomic |depth| operation above so a "release" store 551 // happen after the atomic |depth| operation above so a "release" store
388 // is required. 552 // is required.
389 header_->stack_unchanged.store(0, std::memory_order_release); 553 header_->stack_unchanged.store(0, std::memory_order_release);
554
555 // Release resources located above. All stack processing is done so it's
556 // safe if some outside code does another push.
557 if (user_data)
558 GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data);
559 }
560
561 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
562 ActivityId id) {
563 // User-data is only stored for activities actually held in the stack.
564 if (id < stack_slots_) {
565 void* memory =
566 GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data);
567 if (memory)
568 return MakeUnique<ActivityUserData>(memory, kUserDataSize);
569 }
570
571 // Return a dummy object that will still accept (but ignore) Set() calls.
572 return MakeUnique<ActivityUserData>(nullptr, 0);
390 } 573 }
391 574
392 bool ThreadActivityTracker::IsValid() const { 575 bool ThreadActivityTracker::IsValid() const {
393 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 576 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
394 header_->process_id.load(std::memory_order_relaxed) == 0 || 577 header_->process_id.load(std::memory_order_relaxed) == 0 ||
395 header_->thread_ref.as_id == 0 || 578 header_->thread_ref.as_id == 0 ||
396 header_->start_time == 0 || 579 header_->start_time == 0 ||
397 header_->start_ticks == 0 || 580 header_->start_ticks == 0 ||
398 header_->stack_slots != stack_slots_ || 581 header_->stack_slots != stack_slots_ ||
399 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 582 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
623 return tracker; 806 return tracker;
624 } 807 }
625 808
626 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 809 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
627 ThreadActivityTracker* tracker = 810 ThreadActivityTracker* tracker =
628 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 811 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
629 if (tracker) 812 if (tracker)
630 delete tracker; 813 delete tracker;
631 } 814 }
632 815
816 void* GlobalActivityTracker::GetUserDataMemory(
817 PersistentMemoryAllocator::Reference* reference) {
818 if (!*reference) {
819 base::AutoLock autolock(user_data_allocator_lock_);
820 *reference = user_data_allocator_.GetObjectReference();
821 if (!*reference)
822 return nullptr;
823 }
824
825 void* memory =
826 allocator_->GetAsObject<char>(*reference, kTypeIdUserDataRecord);
827 DCHECK(memory);
828 return memory;
829 }
830
831 void GlobalActivityTracker::ReleaseUserDataMemory(
832 PersistentMemoryAllocator::Reference* reference) {
833 DCHECK(*reference);
834 base::AutoLock autolock(user_data_allocator_lock_);
835 user_data_allocator_.ReleaseObjectReference(*reference);
836 *reference = PersistentMemoryAllocator::kReferenceNull;
837 }
838
633 GlobalActivityTracker::GlobalActivityTracker( 839 GlobalActivityTracker::GlobalActivityTracker(
634 std::unique_ptr<PersistentMemoryAllocator> allocator, 840 std::unique_ptr<PersistentMemoryAllocator> allocator,
635 int stack_depth) 841 int stack_depth)
636 : allocator_(std::move(allocator)), 842 : allocator_(std::move(allocator)),
637 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 843 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
638 this_thread_tracker_(&OnTLSDestroy), 844 this_thread_tracker_(&OnTLSDestroy),
639 thread_tracker_count_(0), 845 thread_tracker_count_(0),
640 thread_tracker_allocator_(allocator_.get(), 846 thread_tracker_allocator_(allocator_.get(),
641 kTypeIdActivityTracker, 847 kTypeIdActivityTracker,
642 kTypeIdActivityTrackerFree, 848 kTypeIdActivityTrackerFree,
643 stack_memory_size_, 849 stack_memory_size_,
644 kCachedThreadMemories) { 850 kCachedThreadMemories,
851 /*make_iterable=*/true),
852 user_data_allocator_(allocator_.get(),
853 kTypeIdUserDataRecord,
854 kTypeIdUserDataRecordFree,
855 kUserDataSize,
856 kCachedUserDataMemories,
857 /*make_iterable=*/false),
858 user_data_(
859 allocator_->GetAsObject<char>(
860 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
861 kTypeIdGlobalDataRecord),
862 kGlobalDataSize) {
645 // Ensure the passed memory is valid and empty (iterator finds nothing). 863 // Ensure the passed memory is valid and empty (iterator finds nothing).
646 uint32_t type; 864 uint32_t type;
647 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 865 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
648 866
649 // Ensure that there is no other global object and then make this one such. 867 // Ensure that there is no other global object and then make this one such.
650 DCHECK(!g_tracker_); 868 DCHECK(!g_tracker_);
651 g_tracker_ = this; 869 g_tracker_ = this;
652 } 870 }
653 871
654 GlobalActivityTracker::~GlobalActivityTracker() { 872 GlobalActivityTracker::~GlobalActivityTracker() {
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 const base::Process* process) 962 const base::Process* process)
745 : GlobalActivityTracker::ScopedThreadActivity( 963 : GlobalActivityTracker::ScopedThreadActivity(
746 nullptr, 964 nullptr,
747 Activity::ACT_PROCESS_WAIT, 965 Activity::ACT_PROCESS_WAIT,
748 ActivityData::ForProcess(process->Pid()), 966 ActivityData::ForProcess(process->Pid()),
749 /*lock_allowed=*/true) {} 967 /*lock_allowed=*/true) {}
750 #endif 968 #endif
751 969
752 } // namespace debug 970 } // namespace debug
753 } // namespace base 971 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698