Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(824)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2422213002: Added support for storing arbitrary user data. (Closed)
Patch Set: some 'git cl format' changes Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm>
8
7 #include "base/debug/stack_trace.h" 9 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 10 #include "base/files/file.h"
9 #include "base/files/file_path.h" 11 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 12 #include "base/files/memory_mapped_file.h"
11 #include "base/logging.h" 13 #include "base/logging.h"
12 #include "base/memory/ptr_util.h" 14 #include "base/memory/ptr_util.h"
13 #include "base/metrics/field_trial.h" 15 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram_macros.h" 16 #include "base/metrics/histogram_macros.h"
15 #include "base/pending_task.h" 17 #include "base/pending_task.h"
16 #include "base/process/process.h" 18 #include "base/process/process.h"
17 #include "base/process/process_handle.h" 19 #include "base/process/process_handle.h"
18 #include "base/stl_util.h" 20 #include "base/stl_util.h"
19 #include "base/strings/string_util.h" 21 #include "base/strings/string_util.h"
20 #include "base/threading/platform_thread.h" 22 #include "base/threading/platform_thread.h"
21 23
22 namespace base { 24 namespace base {
23 namespace debug { 25 namespace debug {
24 26
25 namespace { 27 namespace {
26 28
27 // A number that identifies the memory as having been initialized. It's 29 // A number that identifies the memory as having been initialized. It's
28 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). 30 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
29 // A version number is added on so that major structure changes won't try to 31 // A version number is added on so that major structure changes won't try to
30 // read an older version (since the cookie won't match). 32 // read an older version (since the cookie won't match).
31 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 33 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
32 34
33 // The minimum depth a stack should support. 35 // The minimum depth a stack should support.
34 const int kMinStackDepth = 2; 36 const int kMinStackDepth = 2;
35 37
38 // The amount of memory set aside for holding arbitrary user data (key/value
39 // pairs) globally or associated with ActivityData entries.
40 const size_t kUserDataSize = 1024; // bytes
41 const size_t kGlobalDataSize = 1024; // bytes
42 const size_t kMaxUserDataNameLength =
43 static_cast<size_t>(std::numeric_limits<uint8_t>::max());
44
36 union ThreadRef { 45 union ThreadRef {
37 int64_t as_id; 46 int64_t as_id;
38 #if defined(OS_WIN) 47 #if defined(OS_WIN)
39 // On Windows, the handle itself is often a pseudo-handle with a common 48 // On Windows, the handle itself is often a pseudo-handle with a common
40 // value meaning "this thread" and so the thread-id is used. The former 49 // value meaning "this thread" and so the thread-id is used. The former
41 // can be converted to a thread-id with a system call. 50 // can be converted to a thread-id with a system call.
42 PlatformThreadId as_tid; 51 PlatformThreadId as_tid;
43 #elif defined(OS_POSIX) 52 #elif defined(OS_POSIX)
44 // On Posix, the handle is always a unique identifier so no conversion 53 // On Posix, the handle is always a unique identifier so no conversion
45 // needs to be done. However, it's value is officially opaque so there 54 // needs to be done. However, it's value is officially opaque so there
46 // is no one correct way to convert it to a numerical identifier. 55 // is no one correct way to convert it to a numerical identifier.
47 PlatformThreadHandle::Handle as_handle; 56 PlatformThreadHandle::Handle as_handle;
48 #endif 57 #endif
49 }; 58 };
50 59
60 // Determines the next aligned index.
61 size_t RoundUpToAlignment(size_t index, size_t alignment) {
62 return (index + (alignment - 1)) & (0 - alignment);
63 }
64
51 } // namespace 65 } // namespace
52 66
53 67
54 // It doesn't matter what is contained in this (though it will be all zeros) 68 // It doesn't matter what is contained in this (though it will be all zeros)
55 // as only the address of it is important. 69 // as only the address of it is important.
56 const ActivityData kNullActivityData = {}; 70 const ActivityData kNullActivityData = {};
57 71
58 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) { 72 ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
59 ThreadRef thread_ref; 73 ThreadRef thread_ref;
60 thread_ref.as_id = 0; // Zero the union in case other is smaller. 74 thread_ref.as_id = 0; // Zero the union in case other is smaller.
61 #if defined(OS_WIN) 75 #if defined(OS_WIN)
62 thread_ref.as_tid = ::GetThreadId(handle.platform_handle()); 76 thread_ref.as_tid = ::GetThreadId(handle.platform_handle());
63 #elif defined(OS_POSIX) 77 #elif defined(OS_POSIX)
64 thread_ref.as_handle = handle.platform_handle(); 78 thread_ref.as_handle = handle.platform_handle();
65 #endif 79 #endif
66 return ForThread(thread_ref.as_id); 80 return ForThread(thread_ref.as_id);
67 } 81 }
68 82
69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator( 83 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
70 PersistentMemoryAllocator* allocator, 84 PersistentMemoryAllocator* allocator,
71 uint32_t object_type, 85 uint32_t object_type,
72 uint32_t object_free_type, 86 uint32_t object_free_type,
73 size_t object_size, 87 size_t object_size,
74 size_t cache_size) 88 size_t cache_size,
89 bool make_iterable)
75 : allocator_(allocator), 90 : allocator_(allocator),
76 object_type_(object_type), 91 object_type_(object_type),
77 object_free_type_(object_free_type), 92 object_free_type_(object_free_type),
78 object_size_(object_size), 93 object_size_(object_size),
79 cache_size_(cache_size), 94 cache_size_(cache_size),
95 make_iterable_(make_iterable),
80 iterator_(allocator), 96 iterator_(allocator),
81 cache_values_(new Reference[cache_size]), 97 cache_values_(new Reference[cache_size]),
82 cache_used_(0) { 98 cache_used_(0) {
83 DCHECK(allocator); 99 DCHECK(allocator);
84 } 100 }
85 101
86 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {} 102 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
87 103
88 ActivityTrackerMemoryAllocator::Reference 104 ActivityTrackerMemoryAllocator::Reference
89 ActivityTrackerMemoryAllocator::GetObjectReference() { 105 ActivityTrackerMemoryAllocator::GetObjectReference() {
(...skipping 30 matching lines...) Expand all
120 break; 136 break;
121 } 137 }
122 if (!found) { 138 if (!found) {
123 // Reached end; start over at the beginning. 139 // Reached end; start over at the beginning.
124 iterator_.Reset(); 140 iterator_.Reset();
125 } 141 }
126 } 142 }
127 143
128 // No free block was found so instead allocate a new one. 144 // No free block was found so instead allocate a new one.
129 Reference allocated = allocator_->Allocate(object_size_, object_type_); 145 Reference allocated = allocator_->Allocate(object_size_, object_type_);
130 if (allocated) 146 if (allocated && make_iterable_)
131 allocator_->MakeIterable(allocated); 147 allocator_->MakeIterable(allocated);
132 return allocated; 148 return allocated;
133 } 149 }
134 150
135 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) { 151 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
136 // Zero the memory so that it is ready for immediate use if needed later. 152 // Zero the memory so that it is ready for immediate use if needed later.
137 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_); 153 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
138 DCHECK(mem_base); 154 DCHECK(mem_base);
139 memset(mem_base, 0, object_size_); 155 memset(mem_base, 0, object_size_);
140 156
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
173 } 189 }
174 activity->call_stack[i - 1] = 0; 190 activity->call_stack[i - 1] = 0;
175 #else 191 #else
176 activity->call_stack[0] = 0; 192 activity->call_stack[0] = 0;
177 #endif 193 #endif
178 } 194 }
179 195
180 ActivitySnapshot::ActivitySnapshot() {} 196 ActivitySnapshot::ActivitySnapshot() {}
181 ActivitySnapshot::~ActivitySnapshot() {} 197 ActivitySnapshot::~ActivitySnapshot() {}
182 198
199 ActivityUserData::ValueInfo::ValueInfo() {}
200 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
201 ActivityUserData::ValueInfo::~ValueInfo() {}
202
203 ActivityUserData::ActivityUserData(void* memory, size_t size)
204 : memory_(static_cast<char*>(memory)), available_(size) {}
205
206 ActivityUserData::~ActivityUserData() {}
207
208 void ActivityUserData::Set(StringPiece name,
209 ValueType type,
210 const void* memory,
211 size_t size) {
212 DCHECK(thread_checker_.CalledOnValidThread());
213 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
214 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
215 size);
216
217 // It's possible that no user data is being stored.
218 if (!memory_)
219 return;
220
221 // The storage of a name is limited so use that limit during lookup.
222 if (name.length() > kMaxUserDataNameLength)
223 name.set(name.data(), kMaxUserDataNameLength);
224
225 ValueInfo* info;
226 auto existing = values_.find(name);
227 if (existing != values_.end()) {
228 info = &existing->second;
229 } else {
230 // The name size is limited to what can be held in a single byte but
231 // because there are not alignment constraints on strings, it's set tight
232 // against the header. Its extent (the reserved space, even if it's not
233 // all used) is calculated so that, when pressed against the header, the
234 // following field will be aligned properly.
235 size_t name_size = name.length();
236 size_t name_extent =
237 RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
238 sizeof(Header);
239 size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
240
241 // The "basic size" is the minimum size of the record. It's possible that
242 // lengthy values will get truncated but there must be at least some bytes
243 // available.
244 size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment;
245 if (basic_size > available_)
246 return; // No space to store even the smallest value.
247
248 // The "full size" is the size for storing the entire value, truncated
249 // to the amount of available memory.
250 size_t full_size =
251 std::min(sizeof(Header) + name_extent + value_extent, available_);
252 size = std::min(full_size - sizeof(Header) - name_extent, size);
253
254 // Allocate a chunk of memory.
255 Header* header = reinterpret_cast<Header*>(memory_);
256 memory_ += full_size;
257 available_ -= full_size;
258
259 // Datafill the header and name records. Memory must be zeroed. The |type|
260 // is written last, atomically, to release all the other values.
261 DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
262 DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
263 header->name_size = static_cast<uint8_t>(name_size);
264 header->record_size = full_size;
265 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
266 void* value_memory =
267 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
268 memcpy(name_memory, name.data(), name_size);
269 header->type.store(type, std::memory_order_release);
270
271 // Create an entry in |values_| so that this field can be found and changed
272 // later on without having to allocate new entries.
273 StringPiece persistent_name(name_memory, name_size);
274 auto inserted =
275 values_.insert(std::make_pair(persistent_name, ValueInfo()));
276 DCHECK(inserted.second); // True if inserted, false if existed.
277 info = &inserted.first->second;
278 info->name = persistent_name;
279 info->memory = value_memory;
280 info->size_ptr = &header->value_size;
281 info->extent = full_size - sizeof(Header) - name_extent;
282 info->type = type;
283 }
284
285 // Copy the value data to storage. The |size| is written last, atomically, to
286 // release the copied data. Until then, a parallel reader will just ignore
287 // records with a zero size.
288 DCHECK_EQ(type, info->type);
289 size = std::min(size, info->extent);
290 info->size_ptr->store(0, std::memory_order_seq_cst);
291 memcpy(info->memory, memory, size);
292 info->size_ptr->store(size, std::memory_order_release);
293 }
294
295 void ActivityUserData::SetReference(StringPiece name,
296 ValueType type,
297 const void* memory,
298 size_t size) {
299 ReferenceRecord rec;
300 rec.address = reinterpret_cast<uintptr_t>(memory);
301 rec.size = size;
302 Set(name, type, &rec, sizeof(rec));
303 }
183 304
184 // This information is kept for every thread that is tracked. It is filled 305 // This information is kept for every thread that is tracked. It is filled
185 // the very first time the thread is seen. All fields must be of exact sizes 306 // the very first time the thread is seen. All fields must be of exact sizes
186 // so there is no issue moving between 32 and 64-bit builds. 307 // so there is no issue moving between 32 and 64-bit builds.
187 struct ThreadActivityTracker::Header { 308 struct ThreadActivityTracker::Header {
188 // This unique number indicates a valid initialization of the memory. 309 // This unique number indicates a valid initialization of the memory.
189 std::atomic<uint32_t> cookie; 310 std::atomic<uint32_t> cookie;
190 uint32_t reserved; // pad out to 64 bits 311 uint32_t reserved; // pad out to 64 bits
191 312
192 // The process-id and thread-id (thread_ref.as_id) to which this data belongs. 313 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
227 // is not the current implementation so no parallel snapshots allowed). 348 // is not the current implementation so no parallel snapshots allowed).
228 std::atomic<uint32_t> stack_unchanged; 349 std::atomic<uint32_t> stack_unchanged;
229 350
230 // The name of the thread (up to a maximum length). Dynamic-length names 351 // The name of the thread (up to a maximum length). Dynamic-length names
231 // are not practical since the memory has to come from the same persistent 352 // are not practical since the memory has to come from the same persistent
232 // allocator that holds this structure and to which this object has no 353 // allocator that holds this structure and to which this object has no
233 // reference. 354 // reference.
234 char thread_name[32]; 355 char thread_name[32];
235 }; 356 };
236 357
358 ThreadActivityTracker::ScopedActivity::ScopedActivity(
359 ThreadActivityTracker* tracker,
360 const void* program_counter,
361 const void* origin,
362 Activity::Type type,
363 const ActivityData& data)
364 : tracker_(tracker) {
365 if (tracker_)
366 activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
367 }
368
369 ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
370 if (tracker_)
371 tracker_->PopActivity(activity_id_);
372 }
373
374 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
375 Activity::Type type,
376 const ActivityData& data) {
377 if (tracker_)
378 tracker_->ChangeActivity(activity_id_, type, data);
379 }
380
381 ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() {
382 if (!user_data_) {
383 if (tracker_)
384 user_data_ = tracker_->GetUserData(activity_id_);
385 else
386 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
387 }
388 return *user_data_;
389 }
390
237 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) 391 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
238 : header_(static_cast<Header*>(base)), 392 : header_(static_cast<Header*>(base)),
239 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + 393 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
240 sizeof(Header))), 394 sizeof(Header))),
241 stack_slots_( 395 stack_slots_(
242 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { 396 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
243 DCHECK(thread_checker_.CalledOnValidThread()); 397 DCHECK(thread_checker_.CalledOnValidThread());
244 398
245 // Verify the parameters but fail gracefully if they're not valid so that 399 // Verify the parameters but fail gracefully if they're not valid so that
246 // production code based on external inputs will not crash. IsValid() will 400 // production code based on external inputs will not crash. IsValid() will
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
304 DCHECK(IsValid()); 458 DCHECK(IsValid());
305 } else { 459 } else {
306 // This is a file with existing data. Perform basic consistency checks. 460 // This is a file with existing data. Perform basic consistency checks.
307 valid_ = true; 461 valid_ = true;
308 valid_ = IsValid(); 462 valid_ = IsValid();
309 } 463 }
310 } 464 }
311 465
312 ThreadActivityTracker::~ThreadActivityTracker() {} 466 ThreadActivityTracker::~ThreadActivityTracker() {}
313 467
314 void ThreadActivityTracker::PushActivity(const void* program_counter, 468 ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
315 const void* origin, 469 const void* program_counter,
316 Activity::Type type, 470 const void* origin,
317 const ActivityData& data) { 471 Activity::Type type,
472 const ActivityData& data) {
318 // A thread-checker creates a lock to check the thread-id which means 473 // A thread-checker creates a lock to check the thread-id which means
319 // re-entry into this code if lock acquisitions are being tracked. 474 // re-entry into this code if lock acquisitions are being tracked.
320 DCHECK(type == Activity::ACT_LOCK_ACQUIRE || 475 DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
321 thread_checker_.CalledOnValidThread()); 476 thread_checker_.CalledOnValidThread());
322 477
323 // Get the current depth of the stack. No access to other memory guarded 478 // Get the current depth of the stack. No access to other memory guarded
324 // by this variable is done here so a "relaxed" load is acceptable. 479 // by this variable is done here so a "relaxed" load is acceptable.
325 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); 480 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
326 481
327 // Handle the case where the stack depth has exceeded the storage capacity. 482 // Handle the case where the stack depth has exceeded the storage capacity.
328 // Extra entries will be lost leaving only the base of the stack. 483 // Extra entries will be lost leaving only the base of the stack.
329 if (depth >= stack_slots_) { 484 if (depth >= stack_slots_) {
330 // Since no other threads modify the data, no compare/exchange is needed. 485 // Since no other threads modify the data, no compare/exchange is needed.
331 // Since no other memory is being modified, a "relaxed" store is acceptable. 486 // Since no other memory is being modified, a "relaxed" store is acceptable.
332 header_->current_depth.store(depth + 1, std::memory_order_relaxed); 487 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
333 return; 488 return depth;
334 } 489 }
335 490
336 // Get a pointer to the next activity and load it. No atomicity is required 491 // Get a pointer to the next activity and load it. No atomicity is required
337 // here because the memory is known only to this thread. It will be made 492 // here because the memory is known only to this thread. It will be made
338 // known to other threads once the depth is incremented. 493 // known to other threads once the depth is incremented.
339 Activity::FillFrom(&stack_[depth], program_counter, origin, type, data); 494 Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
340 495
341 // Save the incremented depth. Because this guards |activity| memory filled 496 // Save the incremented depth. Because this guards |activity| memory filled
342 // above that may be read by another thread once the recorded depth changes, 497 // above that may be read by another thread once the recorded depth changes,
343 // a "release" store is required. 498 // a "release" store is required.
344 header_->current_depth.store(depth + 1, std::memory_order_release); 499 header_->current_depth.store(depth + 1, std::memory_order_release);
500
501 // The current depth is used as the activity ID because it simply identifies
502 // an entry. Once an entry is pop'd, it's okay to reuse the ID.
503 return depth;
345 } 504 }
346 505
347 void ThreadActivityTracker::ChangeActivity(Activity::Type type, 506 void ThreadActivityTracker::ChangeActivity(ActivityId id,
507 Activity::Type type,
348 const ActivityData& data) { 508 const ActivityData& data) {
349 DCHECK(thread_checker_.CalledOnValidThread()); 509 DCHECK(thread_checker_.CalledOnValidThread());
350 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData); 510 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
351 511 DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
352 // Get the current depth of the stack and acquire the data held there.
353 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
354 DCHECK_LT(0U, depth);
355 512
356 // Update the information if it is being recorded (i.e. within slot limit). 513 // Update the information if it is being recorded (i.e. within slot limit).
357 if (depth <= stack_slots_) { 514 if (id < stack_slots_) {
358 Activity* activity = &stack_[depth - 1]; 515 Activity* activity = &stack_[id];
359 516
360 if (type != Activity::ACT_NULL) { 517 if (type != Activity::ACT_NULL) {
361 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK, 518 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
362 type & Activity::ACT_CATEGORY_MASK); 519 type & Activity::ACT_CATEGORY_MASK);
363 activity->activity_type = type; 520 activity->activity_type = type;
364 } 521 }
365 522
366 if (&data != &kNullActivityData) 523 if (&data != &kNullActivityData)
367 activity->data = data; 524 activity->data = data;
368 } 525 }
369 } 526 }
370 527
371 void ThreadActivityTracker::PopActivity() { 528 void ThreadActivityTracker::PopActivity(ActivityId id) {
372 // Do an atomic decrement of the depth. No changes to stack entries guarded 529 // Do an atomic decrement of the depth. No changes to stack entries guarded
373 // by this variable are done here so a "relaxed" operation is acceptable. 530 // by this variable are done here so a "relaxed" operation is acceptable.
374 // |depth| will receive the value BEFORE it was modified. 531 // |depth| will receive the value BEFORE it was modified which means the
532 // return value must also be decremented. The slot will be "free" after
533 // this call but since only a single thread can access this object, the
534 // data will remain valid until this method returns or calls outside.
375 uint32_t depth = 535 uint32_t depth =
376 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); 536 header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
377 537
378 // Validate that everything is running correctly. 538 // Validate that everything is running correctly.
379 DCHECK_LT(0U, depth); 539 DCHECK_EQ(id, depth);
380 540
381 // A thread-checker creates a lock to check the thread-id which means 541 // A thread-checker creates a lock to check the thread-id which means
382 // re-entry into this code if lock acquisitions are being tracked. 542 // re-entry into this code if lock acquisitions are being tracked.
383 DCHECK(stack_[depth - 1].activity_type == Activity::ACT_LOCK_ACQUIRE || 543 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
384 thread_checker_.CalledOnValidThread()); 544 thread_checker_.CalledOnValidThread());
385 545
546 // Check if there was any user-data memory. It isn't free'd until later
547 // because the call to release it can push something on the stack.
548 PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data;
549 stack_[depth].user_data = 0;
550
386 // The stack has shrunk meaning that some other thread trying to copy the 551 // The stack has shrunk meaning that some other thread trying to copy the
387 // contents for reporting purposes could get bad data. That thread would 552 // contents for reporting purposes could get bad data. That thread would
388 // have written a non-zero value into |stack_unchanged|; clearing it here 553 // have written a non-zero value into |stack_unchanged|; clearing it here
389 // will let that thread detect that something did change. This needs to 554 // will let that thread detect that something did change. This needs to
390 // happen after the atomic |depth| operation above so a "release" store 555 // happen after the atomic |depth| operation above so a "release" store
391 // is required. 556 // is required.
392 header_->stack_unchanged.store(0, std::memory_order_release); 557 header_->stack_unchanged.store(0, std::memory_order_release);
558
559 // Release resources located above. All stack processing is done so it's
560 // safe if some outside code does another push.
561 if (user_data)
562 GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data);
563 }
564
565 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
566 ActivityId id) {
567 // User-data is only stored for activities actually held in the stack.
568 if (id < stack_slots_) {
569 void* memory =
570 GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data);
571 if (memory)
572 return MakeUnique<ActivityUserData>(memory, kUserDataSize);
573 }
574
575 // Return a dummy object that will still accept (but ignore) Set() calls.
576 return MakeUnique<ActivityUserData>(nullptr, 0);
393 } 577 }
394 578
395 bool ThreadActivityTracker::IsValid() const { 579 bool ThreadActivityTracker::IsValid() const {
396 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 580 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
397 header_->process_id.load(std::memory_order_relaxed) == 0 || 581 header_->process_id.load(std::memory_order_relaxed) == 0 ||
398 header_->thread_ref.as_id == 0 || 582 header_->thread_ref.as_id == 0 ||
399 header_->start_time == 0 || 583 header_->start_time == 0 ||
400 header_->start_ticks == 0 || 584 header_->start_ticks == 0 ||
401 header_->stack_slots != stack_slots_ || 585 header_->stack_slots != stack_slots_ ||
402 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 586 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
626 return tracker; 810 return tracker;
627 } 811 }
628 812
629 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 813 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
630 ThreadActivityTracker* tracker = 814 ThreadActivityTracker* tracker =
631 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 815 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
632 if (tracker) 816 if (tracker)
633 delete tracker; 817 delete tracker;
634 } 818 }
635 819
820 void* GlobalActivityTracker::GetUserDataMemory(
821 PersistentMemoryAllocator::Reference* reference) {
822 if (!*reference) {
823 base::AutoLock autolock(user_data_allocator_lock_);
824 *reference = user_data_allocator_.GetObjectReference();
825 if (!*reference)
826 return nullptr;
827 }
828
829 void* memory =
830 allocator_->GetAsObject<char>(*reference, kTypeIdUserDataRecord);
831 DCHECK(memory);
832 return memory;
833 }
834
835 void GlobalActivityTracker::ReleaseUserDataMemory(
836 PersistentMemoryAllocator::Reference* reference) {
837 DCHECK(*reference);
838 base::AutoLock autolock(user_data_allocator_lock_);
839 user_data_allocator_.ReleaseObjectReference(*reference);
840 *reference = PersistentMemoryAllocator::kReferenceNull;
841 }
842
636 GlobalActivityTracker::GlobalActivityTracker( 843 GlobalActivityTracker::GlobalActivityTracker(
637 std::unique_ptr<PersistentMemoryAllocator> allocator, 844 std::unique_ptr<PersistentMemoryAllocator> allocator,
638 int stack_depth) 845 int stack_depth)
639 : allocator_(std::move(allocator)), 846 : allocator_(std::move(allocator)),
640 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 847 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
641 this_thread_tracker_(&OnTLSDestroy), 848 this_thread_tracker_(&OnTLSDestroy),
642 thread_tracker_count_(0), 849 thread_tracker_count_(0),
643 thread_tracker_allocator_(allocator_.get(), 850 thread_tracker_allocator_(allocator_.get(),
644 kTypeIdActivityTracker, 851 kTypeIdActivityTracker,
645 kTypeIdActivityTrackerFree, 852 kTypeIdActivityTrackerFree,
646 stack_memory_size_, 853 stack_memory_size_,
647 kCachedThreadMemories) { 854 kCachedThreadMemories,
855 /*make_iterable=*/true),
856 user_data_allocator_(allocator_.get(),
857 kTypeIdUserDataRecord,
858 kTypeIdUserDataRecordFree,
859 kUserDataSize,
860 kCachedUserDataMemories,
861 /*make_iterable=*/false),
862 user_data_(
863 allocator_->GetAsObject<char>(
864 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
865 kTypeIdGlobalDataRecord),
866 kGlobalDataSize) {
648 // Ensure the passed memory is valid and empty (iterator finds nothing). 867 // Ensure the passed memory is valid and empty (iterator finds nothing).
649 uint32_t type; 868 uint32_t type;
650 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 869 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
651 870
652 // Ensure that there is no other global object and then make this one such. 871 // Ensure that there is no other global object and then make this one such.
653 DCHECK(!g_tracker_); 872 DCHECK(!g_tracker_);
654 g_tracker_ = this; 873 g_tracker_ = this;
655 } 874 }
656 875
657 GlobalActivityTracker::~GlobalActivityTracker() { 876 GlobalActivityTracker::~GlobalActivityTracker() {
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
759 : GlobalActivityTracker::ScopedThreadActivity( 978 : GlobalActivityTracker::ScopedThreadActivity(
760 program_counter, 979 program_counter,
761 nullptr, 980 nullptr,
762 Activity::ACT_PROCESS_WAIT, 981 Activity::ACT_PROCESS_WAIT,
763 ActivityData::ForProcess(process->Pid()), 982 ActivityData::ForProcess(process->Pid()),
764 /*lock_allowed=*/true) {} 983 /*lock_allowed=*/true) {}
765 #endif 984 #endif
766 985
767 } // namespace debug 986 } // namespace debug
768 } // namespace base 987 } // namespace base
OLDNEW
« no previous file with comments | « base/debug/activity_tracker.h ('k') | base/debug/activity_tracker_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698