Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(45)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2422213002: Added support for storing arbitrary user data. (Closed)
Patch Set: plumb ActivityUserData all the way through Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm>
8
7 #include "base/debug/stack_trace.h" 9 #include "base/debug/stack_trace.h"
8 #include "base/files/file.h" 10 #include "base/files/file.h"
9 #include "base/files/file_path.h" 11 #include "base/files/file_path.h"
10 #include "base/files/memory_mapped_file.h" 12 #include "base/files/memory_mapped_file.h"
11 #include "base/logging.h" 13 #include "base/logging.h"
12 #include "base/memory/ptr_util.h" 14 #include "base/memory/ptr_util.h"
13 #include "base/metrics/field_trial.h" 15 #include "base/metrics/field_trial.h"
14 #include "base/metrics/histogram_macros.h" 16 #include "base/metrics/histogram_macros.h"
15 #include "base/pending_task.h" 17 #include "base/pending_task.h"
16 #include "base/process/process.h" 18 #include "base/process/process.h"
17 #include "base/process/process_handle.h" 19 #include "base/process/process_handle.h"
18 #include "base/stl_util.h" 20 #include "base/stl_util.h"
19 #include "base/strings/string_util.h" 21 #include "base/strings/string_util.h"
20 #include "base/threading/platform_thread.h" 22 #include "base/threading/platform_thread.h"
21 23
22 namespace base { 24 namespace base {
23 namespace debug { 25 namespace debug {
24 26
25 namespace { 27 namespace {
26 28
27 // A number that identifies the memory as having been initialized. It's 29 // A number that identifies the memory as having been initialized. It's
28 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker). 30 // arbitrary but happens to be the first 4 bytes of SHA1(ThreadActivityTracker).
29 // A version number is added on so that major structure changes won't try to 31 // A version number is added on so that major structure changes won't try to
30 // read an older version (since the cookie won't match). 32 // read an older version (since the cookie won't match).
31 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2 33 const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
32 34
33 // The minimum depth a stack should support. 35 // The minimum depth a stack should support.
34 const int kMinStackDepth = 2; 36 const int kMinStackDepth = 2;
35 37
38 // The amount of memory set aside for holding arbitrary user data (key/value
39 // pairs) globally or associated with ActivityData entries.
40 const size_t kUserDataSize = 1024; // bytes
41 const size_t kGlobalDataSize = 1024; // bytes
42
36 union ThreadRef { 43 union ThreadRef {
37 int64_t as_id; 44 int64_t as_id;
38 #if defined(OS_WIN) 45 #if defined(OS_WIN)
39 // On Windows, the handle itself is often a pseudo-handle with a common 46 // On Windows, the handle itself is often a pseudo-handle with a common
40 // value meaning "this thread" and so the thread-id is used. The former 47 // value meaning "this thread" and so the thread-id is used. The former
41 // can be converted to a thread-id with a system call. 48 // can be converted to a thread-id with a system call.
42 PlatformThreadId as_tid; 49 PlatformThreadId as_tid;
43 #elif defined(OS_POSIX) 50 #elif defined(OS_POSIX)
44 // On Posix, the handle is always a unique identifier so no conversion 51 // On Posix, the handle is always a unique identifier so no conversion
45 // needs to be done. However, it's value is officially opaque so there 52 // needs to be done. However, it's value is officially opaque so there
(...skipping 18 matching lines...) Expand all
64 thread_ref.as_handle = handle.platform_handle(); 71 thread_ref.as_handle = handle.platform_handle();
65 #endif 72 #endif
66 return ForThread(thread_ref.as_id); 73 return ForThread(thread_ref.as_id);
67 } 74 }
68 75
69 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator( 76 ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
70 PersistentMemoryAllocator* allocator, 77 PersistentMemoryAllocator* allocator,
71 uint32_t object_type, 78 uint32_t object_type,
72 uint32_t object_free_type, 79 uint32_t object_free_type,
73 size_t object_size, 80 size_t object_size,
74 size_t cache_size) 81 size_t cache_size,
82 bool make_iterable)
75 : allocator_(allocator), 83 : allocator_(allocator),
76 object_type_(object_type), 84 object_type_(object_type),
77 object_free_type_(object_free_type), 85 object_free_type_(object_free_type),
78 object_size_(object_size), 86 object_size_(object_size),
79 cache_size_(cache_size), 87 cache_size_(cache_size),
88 make_iterable_(make_iterable),
80 iterator_(allocator), 89 iterator_(allocator),
81 cache_values_(new Reference[cache_size]), 90 cache_values_(new Reference[cache_size]),
82 cache_used_(0) { 91 cache_used_(0) {
83 DCHECK(allocator); 92 DCHECK(allocator);
84 } 93 }
85 94
86 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {} 95 ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
87 96
88 ActivityTrackerMemoryAllocator::Reference 97 ActivityTrackerMemoryAllocator::Reference
89 ActivityTrackerMemoryAllocator::GetObjectReference() { 98 ActivityTrackerMemoryAllocator::GetObjectReference() {
(...skipping 30 matching lines...) Expand all
120 break; 129 break;
121 } 130 }
122 if (!found) { 131 if (!found) {
123 // Reached end; start over at the beginning. 132 // Reached end; start over at the beginning.
124 iterator_.Reset(); 133 iterator_.Reset();
125 } 134 }
126 } 135 }
127 136
128 // No free block was found so instead allocate a new one. 137 // No free block was found so instead allocate a new one.
129 Reference allocated = allocator_->Allocate(object_size_, object_type_); 138 Reference allocated = allocator_->Allocate(object_size_, object_type_);
130 if (allocated) 139 if (allocated && make_iterable_)
131 allocator_->MakeIterable(allocated); 140 allocator_->MakeIterable(allocated);
132 return allocated; 141 return allocated;
133 } 142 }
134 143
135 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) { 144 void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
136 // Zero the memory so that it is ready for immediate use if needed later. 145 // Zero the memory so that it is ready for immediate use if needed later.
137 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_); 146 char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
138 DCHECK(mem_base); 147 DCHECK(mem_base);
139 memset(mem_base, 0, object_size_); 148 memset(mem_base, 0, object_size_);
140 149
(...skipping 30 matching lines...) Expand all
171 } 180 }
172 activity->call_stack[i - 1] = 0; 181 activity->call_stack[i - 1] = 0;
173 #else 182 #else
174 activity->call_stack[0] = 0; 183 activity->call_stack[0] = 0;
175 #endif 184 #endif
176 } 185 }
177 186
178 ActivitySnapshot::ActivitySnapshot() {} 187 ActivitySnapshot::ActivitySnapshot() {}
179 ActivitySnapshot::~ActivitySnapshot() {} 188 ActivitySnapshot::~ActivitySnapshot() {}
180 189
190 ActivityUserData::ValueInfo::ValueInfo() {}
191 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
192 ActivityUserData::ValueInfo::~ValueInfo() {}
193
194 ActivityUserData::ActivityUserData(void* memory, size_t size)
195 : memory_(static_cast<char*>(memory)), available_(size) {}
196
197 ActivityUserData::~ActivityUserData() {}
198
199 void ActivityUserData::Set(StringPiece name,
200 ValueType type,
201 const void* memory,
202 size_t size) {
203 DCHECK(thread_checker_.CalledOnValidThread());
204 DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
205 size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
206 size);
207
208 // It's possible that no user data is being stored.
209 if (!memory_)
210 return;
211
212 ValueInfo* info;
213 auto existing = values_.find(name);
214 if (existing != values_.end()) {
215 info = &existing->second;
216 } else {
217 // The name size is limited to what can be held in a single byte but
218 // because there are not alignment constraints on strings, it's set tight
219 // against the header. It's extent (the reserved space, even if it's not
manzagop (departed) 2016/11/11 17:01:42 typo: It's -> Its
bcwhite 2016/11/14 15:08:42 Actually, that is correct. "It is not" => "It's n
manzagop (departed) 2016/11/14 19:51:17 Ah, I meant the other one! ;) "It's extent (...) i
bcwhite 2016/11/15 13:37:47 I knew that... ;-)
220 // all used) is calculated so that, when pressed against the header, the
221 // following field will be aligned properly.
222 size_t name_size =
223 std::min(name.length(),
224 static_cast<size_t>(std::numeric_limits<uint8_t>::max()));
225 size_t name_extent = ((sizeof(Header) + name_size + kMemoryAlignment - 1) &
226 (0 - kMemoryAlignment)) -
227 sizeof(Header);
228 size_t value_extent =
229 (size + kMemoryAlignment - 1) & (0 - kMemoryAlignment);
manzagop (departed) 2016/11/11 17:01:42 An alignment ceiling function might make this more
bcwhite 2016/11/14 15:08:42 Done.
230
231 // The "basic size" is the minimum size of the record. It's possible that
232 // lengthy values will get truncated but there must be at least some bytes
233 // available.
234 size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment;
235 if (basic_size > available_)
236 return; // No space to store even the smallest value.
237
238 // The "full size" is the size for storing the entire value, truncated
239 // to the amount of available memory.
240 size_t full_size =
241 std::min(sizeof(Header) + name_extent + value_extent, available_);
242 size = std::min(full_size - sizeof(Header) - name_extent, size);
243
244 // Allocate a chunk of memory.
245 Header* header = reinterpret_cast<Header*>(memory_);
246 memory_ += full_size;
247 available_ -= full_size;
248
249 // Datafill the header and name records.
250 DCHECK_EQ(EMPTY_VALUE, header->type); // Memory must be zeroed.
251 header->type = type;
252 header->name_size = static_cast<uint8_t>(name_size);
253 header->value_size = size;
254 header->record_size = full_size;
255 char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
256 void* value_memory =
257 reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
258 memcpy(name_memory, name.data(), name_size);
manzagop (departed) 2016/11/11 17:01:42 Does this need to be nul terminated?
bcwhite 2016/11/14 15:08:42 No. StringPiece doesn't require a NUL terminator.
manzagop (departed) 2016/11/14 19:51:17 I was thinking more in terms of how we read it bac
bcwhite 2016/11/15 13:37:47 In most cases yes but if it was a string overwritt
259
260 // Create an entry in |values_| so that this field can be found and changed
261 // later on without having to allocate new entries.
262 StringPiece persistent_name(name_memory, name_size);
manzagop (departed) 2016/11/11 17:01:42 I'm not sure how we should handle names that excee
bcwhite 2016/11/14 15:08:42 I'll have it pre-truncate before doing a look-up i
manzagop (departed) 2016/11/14 19:51:17 Sgtm!
263 auto inserted =
264 values_.insert(std::make_pair(persistent_name, ValueInfo()));
265 DCHECK(inserted.second); // True if inserted, false if existed.
266 info = &inserted.first->second;
267 info->name = persistent_name;
268 info->memory = value_memory;
269 info->size = size;
270 info->extent = full_size - sizeof(Header) - name_extent;
271 info->type = type;
272 }
273
274 // Copy the value data to storage.
275 DCHECK_EQ(type, info->type);
276 size = std::min(size, info->extent);
277 memcpy(info->memory, memory, size);
manzagop (departed) 2016/11/11 17:01:42 Do we need to update the value's size, or is it al
bcwhite 2016/11/14 15:08:42 Done.
278 info->size = size;
279 }
280
281 void ActivityUserData::SetExternal(StringPiece name,
282 ValueType type,
283 const void* memory,
284 size_t size) {
285 ExternalRecord rec;
286 rec.address = reinterpret_cast<uintptr_t>(memory);
287 rec.size = size;
288 Set(name, static_cast<ValueType>(type - 1), &rec, sizeof(rec));
manzagop (departed) 2016/11/11 17:01:42 Why does the type get decremented (EXTERNAL_STRING
bcwhite 2016/11/14 15:08:42 Done.
289 }
181 290
182 // This information is kept for every thread that is tracked. It is filled 291 // This information is kept for every thread that is tracked. It is filled
183 // the very first time the thread is seen. All fields must be of exact sizes 292 // the very first time the thread is seen. All fields must be of exact sizes
184 // so there is no issue moving between 32 and 64-bit builds. 293 // so there is no issue moving between 32 and 64-bit builds.
185 struct ThreadActivityTracker::Header { 294 struct ThreadActivityTracker::Header {
186 // This unique number indicates a valid initialization of the memory. 295 // This unique number indicates a valid initialization of the memory.
187 std::atomic<uint32_t> cookie; 296 std::atomic<uint32_t> cookie;
188 uint32_t reserved; // pad out to 64 bits 297 uint32_t reserved; // pad out to 64 bits
189 298
190 // The process-id and thread-id (thread_ref.as_id) to which this data belongs. 299 // The process-id and thread-id (thread_ref.as_id) to which this data belongs.
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
225 // is not the current implementation so no parallel snapshots allowed). 334 // is not the current implementation so no parallel snapshots allowed).
226 std::atomic<uint32_t> stack_unchanged; 335 std::atomic<uint32_t> stack_unchanged;
227 336
228 // The name of the thread (up to a maximum length). Dynamic-length names 337 // The name of the thread (up to a maximum length). Dynamic-length names
229 // are not practical since the memory has to come from the same persistent 338 // are not practical since the memory has to come from the same persistent
230 // allocator that holds this structure and to which this object has no 339 // allocator that holds this structure and to which this object has no
231 // reference. 340 // reference.
232 char thread_name[32]; 341 char thread_name[32];
233 }; 342 };
234 343
344 ThreadActivityTracker::ScopedActivity::ScopedActivity(
345 ThreadActivityTracker* tracker,
346 const void* origin,
347 Activity::Type type,
348 const ActivityData& data)
349 : tracker_(tracker) {
350 if (tracker_)
351 activity_id_ = tracker_->PushActivity(origin, type, data);
352 }
353
354 ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
355 if (tracker_)
356 tracker_->PopActivity(activity_id_);
357 }
358
359 void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
360 Activity::Type type,
361 const ActivityData& data) {
362 if (tracker_)
363 tracker_->ChangeActivity(activity_id_, type, data);
364 }
365
366 ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() {
367 if (!user_data_) {
368 if (tracker_)
369 user_data_ = tracker_->GetUserData(activity_id_);
370 else
371 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
372 }
373 return *user_data_;
374 }
375
235 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size) 376 ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
236 : header_(static_cast<Header*>(base)), 377 : header_(static_cast<Header*>(base)),
237 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) + 378 stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
238 sizeof(Header))), 379 sizeof(Header))),
239 stack_slots_( 380 stack_slots_(
240 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) { 381 static_cast<uint32_t>((size - sizeof(Header)) / sizeof(Activity))) {
241 DCHECK(thread_checker_.CalledOnValidThread()); 382 DCHECK(thread_checker_.CalledOnValidThread());
242 383
243 // Verify the parameters but fail gracefully if they're not valid so that 384 // Verify the parameters but fail gracefully if they're not valid so that
244 // production code based on external inputs will not crash. IsValid() will 385 // production code based on external inputs will not crash. IsValid() will
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
302 DCHECK(IsValid()); 443 DCHECK(IsValid());
303 } else { 444 } else {
304 // This is a file with existing data. Perform basic consistency checks. 445 // This is a file with existing data. Perform basic consistency checks.
305 valid_ = true; 446 valid_ = true;
306 valid_ = IsValid(); 447 valid_ = IsValid();
307 } 448 }
308 } 449 }
309 450
310 ThreadActivityTracker::~ThreadActivityTracker() {} 451 ThreadActivityTracker::~ThreadActivityTracker() {}
311 452
312 void ThreadActivityTracker::PushActivity(const void* origin, 453 ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
313 Activity::Type type, 454 const void* origin,
314 const ActivityData& data) { 455 Activity::Type type,
456 const ActivityData& data) {
315 // A thread-checker creates a lock to check the thread-id which means 457 // A thread-checker creates a lock to check the thread-id which means
316 // re-entry into this code if lock acquisitions are being tracked. 458 // re-entry into this code if lock acquisitions are being tracked.
317 DCHECK(type == Activity::ACT_LOCK_ACQUIRE || 459 DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
318 thread_checker_.CalledOnValidThread()); 460 thread_checker_.CalledOnValidThread());
319 461
320 // Get the current depth of the stack. No access to other memory guarded 462 // Get the current depth of the stack. No access to other memory guarded
321 // by this variable is done here so a "relaxed" load is acceptable. 463 // by this variable is done here so a "relaxed" load is acceptable.
322 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed); 464 uint32_t depth = header_->current_depth.load(std::memory_order_relaxed);
323 465
324 // Handle the case where the stack depth has exceeded the storage capacity. 466 // Handle the case where the stack depth has exceeded the storage capacity.
325 // Extra entries will be lost leaving only the base of the stack. 467 // Extra entries will be lost leaving only the base of the stack.
326 if (depth >= stack_slots_) { 468 if (depth >= stack_slots_) {
327 // Since no other threads modify the data, no compare/exchange is needed. 469 // Since no other threads modify the data, no compare/exchange is needed.
328 // Since no other memory is being modified, a "relaxed" store is acceptable. 470 // Since no other memory is being modified, a "relaxed" store is acceptable.
329 header_->current_depth.store(depth + 1, std::memory_order_relaxed); 471 header_->current_depth.store(depth + 1, std::memory_order_relaxed);
330 return; 472 return depth;
331 } 473 }
332 474
333 // Get a pointer to the next activity and load it. No atomicity is required 475 // Get a pointer to the next activity and load it. No atomicity is required
334 // here because the memory is known only to this thread. It will be made 476 // here because the memory is known only to this thread. It will be made
335 // known to other threads once the depth is incremented. 477 // known to other threads once the depth is incremented.
336 Activity::FillFrom(&stack_[depth], origin, type, data); 478 Activity::FillFrom(&stack_[depth], origin, type, data);
337 479
338 // Save the incremented depth. Because this guards |activity| memory filled 480 // Save the incremented depth. Because this guards |activity| memory filled
339 // above that may be read by another thread once the recorded depth changes, 481 // above that may be read by another thread once the recorded depth changes,
340 // a "release" store is required. 482 // a "release" store is required.
341 header_->current_depth.store(depth + 1, std::memory_order_release); 483 header_->current_depth.store(depth + 1, std::memory_order_release);
484
485 // The current depth is used as the activity ID because it simply identifies
486 // an entry. Once an entry is pop'd, it's okay to reuse the ID.
487 return depth;
342 } 488 }
343 489
344 void ThreadActivityTracker::ChangeActivity(Activity::Type type, 490 void ThreadActivityTracker::ChangeActivity(ActivityId id,
491 Activity::Type type,
345 const ActivityData& data) { 492 const ActivityData& data) {
346 DCHECK(thread_checker_.CalledOnValidThread()); 493 DCHECK(thread_checker_.CalledOnValidThread());
347 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData); 494 DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
348 495 DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
349 // Get the current depth of the stack and acquire the data held there.
350 uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
351 DCHECK_LT(0U, depth);
352 496
353 // Update the information if it is being recorded (i.e. within slot limit). 497 // Update the information if it is being recorded (i.e. within slot limit).
354 if (depth <= stack_slots_) { 498 if (id < stack_slots_) {
355 Activity* activity = &stack_[depth - 1]; 499 Activity* activity = &stack_[id];
356 500
357 if (type != Activity::ACT_NULL) { 501 if (type != Activity::ACT_NULL) {
358 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK, 502 DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
359 type & Activity::ACT_CATEGORY_MASK); 503 type & Activity::ACT_CATEGORY_MASK);
360 activity->activity_type = type; 504 activity->activity_type = type;
361 } 505 }
362 506
363 if (&data != &kNullActivityData) 507 if (&data != &kNullActivityData)
364 activity->data = data; 508 activity->data = data;
365 } 509 }
366 } 510 }
367 511
368 void ThreadActivityTracker::PopActivity() { 512 void ThreadActivityTracker::PopActivity(ActivityId id) {
369 // Do an atomic decrement of the depth. No changes to stack entries guarded 513 // Do an atomic decrement of the depth. No changes to stack entries guarded
370 // by this variable are done here so a "relaxed" operation is acceptable. 514 // by this variable are done here so a "relaxed" operation is acceptable.
371 // |depth| will receive the value BEFORE it was modified. 515 // |depth| will receive the value BEFORE it was modified which means the
516 // return value must also be decremented. The slot will be "free" after
517 // this call but since only a single thread can access this object, the
518 // data will remain valid until this method returns or calls outside.
372 uint32_t depth = 519 uint32_t depth =
373 header_->current_depth.fetch_sub(1, std::memory_order_relaxed); 520 header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
374 521
375 // Validate that everything is running correctly. 522 // Validate that everything is running correctly.
376 DCHECK_LT(0U, depth); 523 DCHECK_EQ(id, depth);
377 524
378 // A thread-checker creates a lock to check the thread-id which means 525 // A thread-checker creates a lock to check the thread-id which means
379 // re-entry into this code if lock acquisitions are being tracked. 526 // re-entry into this code if lock acquisitions are being tracked.
380 DCHECK(stack_[depth - 1].activity_type == Activity::ACT_LOCK_ACQUIRE || 527 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
381 thread_checker_.CalledOnValidThread()); 528 thread_checker_.CalledOnValidThread());
382 529
530 // Check if there was any user-data memory. It isn't free'd until later
531 // because the call to release it can push something on the stack.
532 PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data;
533 stack_[depth].user_data = 0;
534
383 // The stack has shrunk meaning that some other thread trying to copy the 535 // The stack has shrunk meaning that some other thread trying to copy the
384 // contents for reporting purposes could get bad data. That thread would 536 // contents for reporting purposes could get bad data. That thread would
385 // have written a non-zero value into |stack_unchanged|; clearing it here 537 // have written a non-zero value into |stack_unchanged|; clearing it here
386 // will let that thread detect that something did change. This needs to 538 // will let that thread detect that something did change. This needs to
387 // happen after the atomic |depth| operation above so a "release" store 539 // happen after the atomic |depth| operation above so a "release" store
388 // is required. 540 // is required.
389 header_->stack_unchanged.store(0, std::memory_order_release); 541 header_->stack_unchanged.store(0, std::memory_order_release);
542
543 // Release resources located above. All stack processing is done so it's
544 // safe if some outside code does another push.
545 if (user_data)
546 GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data);
547 }
548
549 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
550 ActivityId id) {
551 // User-data is only stored for activities actually held in the stack.
552 if (id < stack_slots_) {
553 void* memory =
554 GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data);
555 if (memory)
556 return MakeUnique<ActivityUserData>(memory, kUserDataSize);
557 }
558
559 // Return a dummy object that will still accept (but ignore) Set() calls.
560 return MakeUnique<ActivityUserData>(nullptr, 0);
390 } 561 }
391 562
392 bool ThreadActivityTracker::IsValid() const { 563 bool ThreadActivityTracker::IsValid() const {
393 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie || 564 if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
394 header_->process_id.load(std::memory_order_relaxed) == 0 || 565 header_->process_id.load(std::memory_order_relaxed) == 0 ||
395 header_->thread_ref.as_id == 0 || 566 header_->thread_ref.as_id == 0 ||
396 header_->start_time == 0 || 567 header_->start_time == 0 ||
397 header_->start_ticks == 0 || 568 header_->start_ticks == 0 ||
398 header_->stack_slots != stack_slots_ || 569 header_->stack_slots != stack_slots_ ||
399 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 570 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
(...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after
623 return tracker; 794 return tracker;
624 } 795 }
625 796
626 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() { 797 void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
627 ThreadActivityTracker* tracker = 798 ThreadActivityTracker* tracker =
628 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get()); 799 reinterpret_cast<ThreadActivityTracker*>(this_thread_tracker_.Get());
629 if (tracker) 800 if (tracker)
630 delete tracker; 801 delete tracker;
631 } 802 }
632 803
804 void* GlobalActivityTracker::GetUserDataMemory(
805 PersistentMemoryAllocator::Reference* reference) {
806 if (!*reference) {
807 base::AutoLock autolock(user_data_allocator_lock_);
808 *reference = user_data_allocator_.GetObjectReference();
809 if (!*reference)
810 return nullptr;
811 }
812
813 void* memory =
814 allocator_->GetAsObject<char>(*reference, kTypeIdUserDataRecord);
815 DCHECK(memory);
816 return memory;
817 }
818
819 void GlobalActivityTracker::ReleaseUserDataMemory(
820 PersistentMemoryAllocator::Reference* reference) {
821 DCHECK(*reference);
822 base::AutoLock autolock(user_data_allocator_lock_);
823 user_data_allocator_.ReleaseObjectReference(*reference);
824 *reference = PersistentMemoryAllocator::kReferenceNull;
825 }
826
633 GlobalActivityTracker::GlobalActivityTracker( 827 GlobalActivityTracker::GlobalActivityTracker(
634 std::unique_ptr<PersistentMemoryAllocator> allocator, 828 std::unique_ptr<PersistentMemoryAllocator> allocator,
635 int stack_depth) 829 int stack_depth)
636 : allocator_(std::move(allocator)), 830 : allocator_(std::move(allocator)),
637 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)), 831 stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
638 this_thread_tracker_(&OnTLSDestroy), 832 this_thread_tracker_(&OnTLSDestroy),
639 thread_tracker_count_(0), 833 thread_tracker_count_(0),
640 thread_tracker_allocator_(allocator_.get(), 834 thread_tracker_allocator_(allocator_.get(),
641 kTypeIdActivityTracker, 835 kTypeIdActivityTracker,
642 kTypeIdActivityTrackerFree, 836 kTypeIdActivityTrackerFree,
643 stack_memory_size_, 837 stack_memory_size_,
644 kCachedThreadMemories) { 838 kCachedThreadMemories,
839 /*make_iterable=*/true),
840 user_data_allocator_(allocator_.get(),
841 kTypeIdUserDataRecord,
842 kTypeIdUserDataRecordFree,
843 kUserDataSize,
844 kCachedUserDataMemories,
845 /*make_iterable=*/false),
846 user_data_(
847 allocator_->GetAsObject<char>(
848 allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
849 kTypeIdGlobalDataRecord),
850 kGlobalDataSize) {
645 // Ensure the passed memory is valid and empty (iterator finds nothing). 851 // Ensure the passed memory is valid and empty (iterator finds nothing).
646 uint32_t type; 852 uint32_t type;
647 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type)); 853 DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
648 854
649 // Ensure that there is no other global object and then make this one such. 855 // Ensure that there is no other global object and then make this one such.
650 DCHECK(!g_tracker_); 856 DCHECK(!g_tracker_);
651 g_tracker_ = this; 857 g_tracker_ = this;
652 } 858 }
653 859
654 GlobalActivityTracker::~GlobalActivityTracker() { 860 GlobalActivityTracker::~GlobalActivityTracker() {
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
744 const base::Process* process) 950 const base::Process* process)
745 : GlobalActivityTracker::ScopedThreadActivity( 951 : GlobalActivityTracker::ScopedThreadActivity(
746 nullptr, 952 nullptr,
747 Activity::ACT_PROCESS_WAIT, 953 Activity::ACT_PROCESS_WAIT,
748 ActivityData::ForProcess(process->Pid()), 954 ActivityData::ForProcess(process->Pid()),
749 /*lock_allowed=*/true) {} 955 /*lock_allowed=*/true) {}
750 #endif 956 #endif
751 957
752 } // namespace debug 958 } // namespace debug
753 } // namespace base 959 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698