Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: base/debug/activity_tracker.cc

Issue 2702413006: Enable storing last-dispatched exception per-thread. (Closed)
Patch Set: harden exception recording Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/activity_tracker.h" 5 #include "base/debug/activity_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <limits> 8 #include <limits>
9 #include <utility> 9 #include <utility>
10 10
11 #include "base/atomic_sequence_num.h" 11 #include "base/atomic_sequence_num.h"
12 #include "base/atomicops.h"
12 #include "base/debug/stack_trace.h" 13 #include "base/debug/stack_trace.h"
13 #include "base/files/file.h" 14 #include "base/files/file.h"
14 #include "base/files/file_path.h" 15 #include "base/files/file_path.h"
15 #include "base/files/memory_mapped_file.h" 16 #include "base/files/memory_mapped_file.h"
17 #include "base/lazy_instance.h"
16 #include "base/logging.h" 18 #include "base/logging.h"
17 #include "base/memory/ptr_util.h" 19 #include "base/memory/ptr_util.h"
18 #include "base/metrics/field_trial.h" 20 #include "base/metrics/field_trial.h"
19 #include "base/metrics/histogram_macros.h" 21 #include "base/metrics/histogram_macros.h"
20 #include "base/pending_task.h" 22 #include "base/pending_task.h"
21 #include "base/pickle.h" 23 #include "base/pickle.h"
22 #include "base/process/process.h" 24 #include "base/process/process.h"
23 #include "base/process/process_handle.h" 25 #include "base/process/process_handle.h"
24 #include "base/stl_util.h" 26 #include "base/stl_util.h"
25 #include "base/strings/string_util.h" 27 #include "base/strings/string_util.h"
(...skipping 19 matching lines...) Expand all
45 // A constant used to indicate that module information is changing. 47 // A constant used to indicate that module information is changing.
46 const uint32_t kModuleInformationChanging = 0x80000000; 48 const uint32_t kModuleInformationChanging = 0x80000000;
47 49
48 // The key used to record process information. 50 // The key used to record process information.
49 const char kProcessPhaseDataKey[] = "process-phase"; 51 const char kProcessPhaseDataKey[] = "process-phase";
50 52
51 // An atomically incrementing number, used to check for recreations of objects 53 // An atomically incrementing number, used to check for recreations of objects
52 // in the same memory space. 54 // in the same memory space.
53 StaticAtomicSequenceNumber g_next_id; 55 StaticAtomicSequenceNumber g_next_id;
54 56
57 // An reusable user-data sink that just discards all data saved to it.
58 LazyInstance<ActivityUserData>::Leaky g_data_sink;
59
60 // A pointer to an AcitivyUserData object that can used when handing exceptions.
manzagop (departed) 2017/02/27 16:05:16 typo: AcitivyUserData, handing
bcwhite 2017/03/14 12:53:26 Done.
61 // This is an AtomicWord because std::atomic creates global ctors & dtors.
62 subtle::AtomicWord g_exception_data = 0;
63
55 union ThreadRef { 64 union ThreadRef {
56 int64_t as_id; 65 int64_t as_id;
57 #if defined(OS_WIN) 66 #if defined(OS_WIN)
58 // On Windows, the handle itself is often a pseudo-handle with a common 67 // On Windows, the handle itself is often a pseudo-handle with a common
59 // value meaning "this thread" and so the thread-id is used. The former 68 // value meaning "this thread" and so the thread-id is used. The former
60 // can be converted to a thread-id with a system call. 69 // can be converted to a thread-id with a system call.
61 PlatformThreadId as_tid; 70 PlatformThreadId as_tid;
62 #elif defined(OS_POSIX) 71 #elif defined(OS_POSIX)
63 // On Posix, the handle is always a unique identifier so no conversion 72 // On Posix, the handle is always a unique identifier so no conversion
64 // needs to be done. However, it's value is officially opaque so there 73 // needs to be done. However, it's value is officially opaque so there
(...skipping 244 matching lines...) Expand 10 before | Expand all | Expand 10 after
309 // thus clang requires explicit out-of-line constructors and destructors even 318 // thus clang requires explicit out-of-line constructors and destructors even
310 // when they do nothing. 319 // when they do nothing.
311 ActivityUserData::ValueInfo::ValueInfo() {} 320 ActivityUserData::ValueInfo::ValueInfo() {}
312 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default; 321 ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
313 ActivityUserData::ValueInfo::~ValueInfo() {} 322 ActivityUserData::ValueInfo::~ValueInfo() {}
314 ActivityUserData::MemoryHeader::MemoryHeader() {} 323 ActivityUserData::MemoryHeader::MemoryHeader() {}
315 ActivityUserData::MemoryHeader::~MemoryHeader() {} 324 ActivityUserData::MemoryHeader::~MemoryHeader() {}
316 ActivityUserData::FieldHeader::FieldHeader() {} 325 ActivityUserData::FieldHeader::FieldHeader() {}
317 ActivityUserData::FieldHeader::~FieldHeader() {} 326 ActivityUserData::FieldHeader::~FieldHeader() {}
318 327
328 ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {}
329
319 ActivityUserData::ActivityUserData(void* memory, size_t size) 330 ActivityUserData::ActivityUserData(void* memory, size_t size)
320 : memory_(reinterpret_cast<char*>(memory)), 331 : memory_(reinterpret_cast<char*>(memory)),
321 available_(RoundDownToAlignment(size, kMemoryAlignment)), 332 available_(RoundDownToAlignment(size, kMemoryAlignment)),
322 header_(reinterpret_cast<MemoryHeader*>(memory)) { 333 header_(reinterpret_cast<MemoryHeader*>(memory)) {
323 // It's possible that no user data is being stored. 334 // It's possible that no user data is being stored.
324 if (!memory_) 335 if (!memory_)
325 return; 336 return;
326 337
327 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header"); 338 static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
328 DCHECK_LT(sizeof(MemoryHeader), available_); 339 Initialize();
329 if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
330 header_->owner.Release_Initialize();
331 memory_ += sizeof(MemoryHeader);
332 available_ -= sizeof(MemoryHeader);
333
334 // If there is already data present, load that. This allows the same class
335 // to be used for analysis through snapshots.
336 ImportExistingData();
337 } 340 }
338 341
339 ActivityUserData::~ActivityUserData() {} 342 ActivityUserData::~ActivityUserData() {}
340 343
344 void ActivityUserData::Reset() {
manzagop (departed) 2017/02/27 16:05:16 DCHECK memory_/header_ or early exit.
bcwhite 2017/03/14 12:53:26 Done.
345 // Clear the memory in an atomic manner.
manzagop (departed) 2017/02/27 16:05:16 ActivityUserData is either used in a thread affine
bcwhite 2017/03/14 12:53:26 It's written with thread affinity but another (ana
346 std::atomic<int>* data = reinterpret_cast<std::atomic<int>*>(header_);
347 const uint32_t words = (reinterpret_cast<uintptr_t>(memory_) -
348 reinterpret_cast<uintptr_t>(header_)) /
349 sizeof(int);
350 for (uint32_t i = 0; i < words; ++i) {
351 data->store(0, std::memory_order_release);
352 ++data;
353 }
354
355 values_.clear();
manzagop (departed) 2017/02/27 16:05:16 Also reset memory_ and available_?
bcwhite 2017/03/14 12:53:25 Oops! Done.
356 Initialize();
357 }
358
341 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const { 359 bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
342 DCHECK(output_snapshot); 360 DCHECK(output_snapshot);
343 DCHECK(output_snapshot->empty()); 361 DCHECK(output_snapshot->empty());
344 362
345 // Find any new data that may have been added by an active instance of this 363 // Find any new data that may have been added by an active instance of this
346 // class that is adding records. 364 // class that is adding records.
347 ImportExistingData(); 365 ImportExistingData();
348 366
349 for (const auto& entry : values_) { 367 for (const auto& entry : values_) {
350 TypedValue value; 368 TypedValue value;
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after
506 void ActivityUserData::SetReference(StringPiece name, 524 void ActivityUserData::SetReference(StringPiece name,
507 ValueType type, 525 ValueType type,
508 const void* memory, 526 const void* memory,
509 size_t size) { 527 size_t size) {
510 ReferenceRecord rec; 528 ReferenceRecord rec;
511 rec.address = reinterpret_cast<uintptr_t>(memory); 529 rec.address = reinterpret_cast<uintptr_t>(memory);
512 rec.size = size; 530 rec.size = size;
513 Set(name, type, &rec, sizeof(rec)); 531 Set(name, type, &rec, sizeof(rec));
514 } 532 }
515 533
534 void ActivityUserData::Initialize() {
manzagop (departed) 2017/02/27 16:05:16 DCHECK memory_ or early return.
bcwhite 2017/03/14 12:53:26 Done.
535 DCHECK_LT(sizeof(MemoryHeader), available_);
536 if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
537 header_->owner.Release_Initialize();
538 memory_ += sizeof(MemoryHeader);
539 available_ -= sizeof(MemoryHeader);
540
541 // If there is already data present, load that. This allows the same class
542 // to be used for analysis through snapshots.
543 ImportExistingData();
manzagop (departed) 2017/02/27 16:05:16 Worth skipping the import when just initialized?
bcwhite 2017/03/14 12:53:26 Not really since the first read will cause it to e
544 }
545
516 void ActivityUserData::ImportExistingData() const { 546 void ActivityUserData::ImportExistingData() const {
517 while (available_ > sizeof(FieldHeader)) { 547 while (available_ > sizeof(FieldHeader)) {
518 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_); 548 FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
519 ValueType type = 549 ValueType type =
520 static_cast<ValueType>(header->type.load(std::memory_order_acquire)); 550 static_cast<ValueType>(header->type.load(std::memory_order_acquire));
521 if (type == END_OF_VALUES) 551 if (type == END_OF_VALUES)
522 return; 552 return;
523 if (header->record_size > available_) 553 if (header->record_size > available_)
524 return; 554 return;
525 555
(...skipping 24 matching lines...) Expand all
550 // This information is kept for every thread that is tracked. It is filled 580 // This information is kept for every thread that is tracked. It is filled
551 // the very first time the thread is seen. All fields must be of exact sizes 581 // the very first time the thread is seen. All fields must be of exact sizes
552 // so there is no issue moving between 32 and 64-bit builds. 582 // so there is no issue moving between 32 and 64-bit builds.
553 struct ThreadActivityTracker::Header { 583 struct ThreadActivityTracker::Header {
554 // Defined in .h for analyzer access. Increment this if structure changes! 584 // Defined in .h for analyzer access. Increment this if structure changes!
555 static constexpr uint32_t kPersistentTypeId = 585 static constexpr uint32_t kPersistentTypeId =
556 GlobalActivityTracker::kTypeIdActivityTracker; 586 GlobalActivityTracker::kTypeIdActivityTracker;
557 587
558 // Expected size for 32/64-bit check. 588 // Expected size for 32/64-bit check.
559 static constexpr size_t kExpectedInstanceSize = 589 static constexpr size_t kExpectedInstanceSize =
560 OwningProcess::kExpectedInstanceSize + 72; 590 OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
591 72;
561 592
562 // This information uniquely identifies a process. 593 // This information uniquely identifies a process.
563 OwningProcess owner; 594 OwningProcess owner;
564 595
565 // The thread-id (thread_ref.as_id) to which this data belongs. This number 596 // The thread-id (thread_ref.as_id) to which this data belongs. This number
566 // is not guaranteed to mean anything but combined with the process-id from 597 // is not guaranteed to mean anything but combined with the process-id from
567 // OwningProcess is unique among all active trackers. 598 // OwningProcess is unique among all active trackers.
568 ThreadRef thread_ref; 599 ThreadRef thread_ref;
569 600
570 // The start-time and start-ticks when the data was created. Each activity 601 // The start-time and start-ticks when the data was created. Each activity
571 // record has a |time_internal| value that can be converted to a "wall time" 602 // record has a |time_internal| value that can be converted to a "wall time"
572 // with these two values. 603 // with these two values.
573 int64_t start_time; 604 int64_t start_time;
574 int64_t start_ticks; 605 int64_t start_ticks;
575 606
576 // The number of Activity slots (spaces that can hold an Activity) that 607 // The number of Activity slots (spaces that can hold an Activity) that
577 // immediately follow this structure in memory. 608 // immediately follow this structure in memory.
578 uint32_t stack_slots; 609 uint32_t stack_slots;
579 610
580 // Some padding to keep everything 64-bit aligned. 611 // Some padding to keep everything 64-bit aligned.
581 uint32_t padding; 612 uint32_t padding;
582 613
583 // The current depth of the stack. This may be greater than the number of 614 // The current depth of the stack. This may be greater than the number of
584 // slots. If the depth exceeds the number of slots, the newest entries 615 // slots. If the depth exceeds the number of slots, the newest entries
585 // won't be recorded. 616 // won't be recorded.
586 std::atomic<uint32_t> current_depth; 617 std::atomic<uint32_t> current_depth;
587 618
588 // A memory location used to indicate if changes have been made to the stack 619 // A memory location used to indicate if changes have been made to the data
589 // that would invalidate an in-progress read of its contents. The active 620 // that would invalidate an in-progress read of its contents. The active
590 // tracker will zero the value whenever something gets popped from the 621 // tracker will zero the value whenever something gets popped from the
591 // stack. A monitoring tracker can write a non-zero value here, copy the 622 // stack. A monitoring tracker can write a non-zero value here, copy the
592 // stack contents, and read the value to know, if it is still non-zero, that 623 // stack contents, and read the value to know, if it is still non-zero, that
593 // the contents didn't change while being copied. This can handle concurrent 624 // the contents didn't change while being copied. This can handle concurrent
594 // snapshot operations only if each snapshot writes a different bit (which 625 // snapshot operations only if each snapshot writes a different bit (which
595 // is not the current implementation so no parallel snapshots allowed). 626 // is not the current implementation so no parallel snapshots allowed).
596 std::atomic<uint32_t> stack_unchanged; 627 std::atomic<uint32_t> data_unchanged;
628
629 // The last "exception" activity. This can't be stored on the stack because
630 // that could get popped as things unwind.
631 Activity last_exception;
597 632
598 // The name of the thread (up to a maximum length). Dynamic-length names 633 // The name of the thread (up to a maximum length). Dynamic-length names
599 // are not practical since the memory has to come from the same persistent 634 // are not practical since the memory has to come from the same persistent
600 // allocator that holds this structure and to which this object has no 635 // allocator that holds this structure and to which this object has no
601 // reference. 636 // reference.
602 char thread_name[32]; 637 char thread_name[32];
603 }; 638 };
604 639
605 ThreadActivityTracker::Snapshot::Snapshot() {} 640 ThreadActivityTracker::Snapshot::Snapshot() {}
606 ThreadActivityTracker::Snapshot::~Snapshot() {} 641 ThreadActivityTracker::Snapshot::~Snapshot() {}
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
664 // Provided memory should either be completely initialized or all zeros. 699 // Provided memory should either be completely initialized or all zeros.
665 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) { 700 if (header_->owner.data_id.load(std::memory_order_relaxed) == 0) {
666 // This is a new file. Double-check other fields and then initialize. 701 // This is a new file. Double-check other fields and then initialize.
667 DCHECK_EQ(0, header_->owner.process_id); 702 DCHECK_EQ(0, header_->owner.process_id);
668 DCHECK_EQ(0, header_->owner.create_stamp); 703 DCHECK_EQ(0, header_->owner.create_stamp);
669 DCHECK_EQ(0, header_->thread_ref.as_id); 704 DCHECK_EQ(0, header_->thread_ref.as_id);
670 DCHECK_EQ(0, header_->start_time); 705 DCHECK_EQ(0, header_->start_time);
671 DCHECK_EQ(0, header_->start_ticks); 706 DCHECK_EQ(0, header_->start_ticks);
672 DCHECK_EQ(0U, header_->stack_slots); 707 DCHECK_EQ(0U, header_->stack_slots);
673 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed)); 708 DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
674 DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed)); 709 DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
675 DCHECK_EQ(0, stack_[0].time_internal); 710 DCHECK_EQ(0, stack_[0].time_internal);
676 DCHECK_EQ(0U, stack_[0].origin_address); 711 DCHECK_EQ(0U, stack_[0].origin_address);
677 DCHECK_EQ(0U, stack_[0].call_stack[0]); 712 DCHECK_EQ(0U, stack_[0].call_stack[0]);
678 DCHECK_EQ(0U, stack_[0].data.task.sequence_id); 713 DCHECK_EQ(0U, stack_[0].data.task.sequence_id);
679 714
680 #if defined(OS_WIN) 715 #if defined(OS_WIN)
681 header_->thread_ref.as_tid = PlatformThread::CurrentId(); 716 header_->thread_ref.as_tid = PlatformThread::CurrentId();
682 #elif defined(OS_POSIX) 717 #elif defined(OS_POSIX)
683 header_->thread_ref.as_handle = 718 header_->thread_ref.as_handle =
684 PlatformThread::CurrentHandle().platform_handle(); 719 PlatformThread::CurrentHandle().platform_handle();
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
778 // Validate that everything is running correctly. 813 // Validate that everything is running correctly.
779 DCHECK_EQ(id, depth); 814 DCHECK_EQ(id, depth);
780 815
781 // A thread-checker creates a lock to check the thread-id which means 816 // A thread-checker creates a lock to check the thread-id which means
782 // re-entry into this code if lock acquisitions are being tracked. 817 // re-entry into this code if lock acquisitions are being tracked.
783 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE || 818 DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
784 thread_checker_.CalledOnValidThread()); 819 thread_checker_.CalledOnValidThread());
785 820
786 // The stack has shrunk meaning that some other thread trying to copy the 821 // The stack has shrunk meaning that some other thread trying to copy the
787 // contents for reporting purposes could get bad data. That thread would 822 // contents for reporting purposes could get bad data. That thread would
788 // have written a non-zero value into |stack_unchanged|; clearing it here 823 // have written a non-zero value into |data_unchanged|; clearing it here
789 // will let that thread detect that something did change. This needs to 824 // will let that thread detect that something did change. This needs to
790 // happen after the atomic |depth| operation above so a "release" store 825 // happen after the atomic |depth| operation above so a "release" store
791 // is required. 826 // is required.
792 header_->stack_unchanged.store(0, std::memory_order_release); 827 header_->data_unchanged.store(0, std::memory_order_release);
793 } 828 }
794 829
795 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData( 830 std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
796 ActivityId id, 831 ActivityId id,
797 ActivityTrackerMemoryAllocator* allocator) { 832 ActivityTrackerMemoryAllocator* allocator) {
798 // User-data is only stored for activities actually held in the stack. 833 // Don't allow user data for lock acquisition as recursion may occur.
799 if (id < stack_slots_) { 834 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
800 // Don't allow user data for lock acquisition as recursion may occur. 835 NOTREACHED();
801 if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) { 836 return MakeUnique<ActivityUserData>();
802 NOTREACHED();
803 return MakeUnique<ActivityUserData>(nullptr, 0);
804 }
805
806 // Get (or reuse) a block of memory and create a real UserData object
807 // on it.
808 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
809 void* memory =
810 allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
811 if (memory) {
812 std::unique_ptr<ActivityUserData> user_data =
813 MakeUnique<ActivityUserData>(memory, kUserDataSize);
814 stack_[id].user_data_ref = ref;
815 stack_[id].user_data_id = user_data->id();
816 return user_data;
817 }
818 } 837 }
819 838
820 // Return a dummy object that will still accept (but ignore) Set() calls. 839 // User-data is only stored for activities actually held in the stack.
821 return MakeUnique<ActivityUserData>(nullptr, 0); 840 if (id >= stack_slots_)
841 return MakeUnique<ActivityUserData>();
842
843 // Create and return a real UserData object.
844 return CreateUserDataForActivity(&stack_[id], allocator);
822 } 845 }
823 846
824 bool ThreadActivityTracker::HasUserData(ActivityId id) { 847 bool ThreadActivityTracker::HasUserData(ActivityId id) {
825 // User-data is only stored for activities actually held in the stack. 848 // User-data is only stored for activities actually held in the stack.
826 return (id < stack_slots_ && stack_[id].user_data_ref); 849 return (id < stack_slots_ && stack_[id].user_data_ref);
827 } 850 }
828 851
829 void ThreadActivityTracker::ReleaseUserData( 852 void ThreadActivityTracker::ReleaseUserData(
830 ActivityId id, 853 ActivityId id,
831 ActivityTrackerMemoryAllocator* allocator) { 854 ActivityTrackerMemoryAllocator* allocator) {
832 // User-data is only stored for activities actually held in the stack. 855 // User-data is only stored for activities actually held in the stack.
833 if (id < stack_slots_ && stack_[id].user_data_ref) { 856 if (id < stack_slots_ && stack_[id].user_data_ref) {
834 allocator->ReleaseObjectReference(stack_[id].user_data_ref); 857 allocator->ReleaseObjectReference(stack_[id].user_data_ref);
835 stack_[id].user_data_ref = 0; 858 stack_[id].user_data_ref = 0;
836 } 859 }
860 // Release user-data for an exception.
manzagop (departed) 2017/02/27 16:05:16 Does this clear the exception data as soon as we p
bcwhite 2017/03/14 12:53:26 No. That is held completely independent of the ac
861 if (header_->last_exception.user_data_ref) {
862 exception_data.reset();
863 allocator->ReleaseObjectReference(header_->last_exception.user_data_ref);
864 header_->last_exception.user_data_ref = 0;
865 }
866 }
867
868 ActivityUserData& ThreadActivityTracker::ExceptionActivity(
869 const void* program_counter,
870 const void* origin,
871 Activity::Type type,
872 const ActivityData& data) {
873 // A thread-checker creates a lock to check the thread-id which means
874 // re-entry into this code if lock acquisitions are being tracked.
875 DCHECK(thread_checker_.CalledOnValidThread());
876
877 // If there was a previous exception, release its user-data.
878 if (header_->last_exception.user_data_ref) {
879 header_->last_exception.user_data_ref = 0;
manzagop (departed) 2017/02/27 16:05:16 This doesn't release the reference?
bcwhite 2017/03/14 12:53:26 Yes. I hadn't finished the conversion from the fi
880 }
881
882 // Fill the reusable exception activity.
883 Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
884 data);
885
886 // The data has changed meaning that some other thread trying to copy the
887 // contents for reporting purposes could get bad data.
888 header_->data_unchanged.store(0, std::memory_order_relaxed);
889
890 // Re-use any existing user-data structure.
manzagop (departed) 2017/02/27 16:05:16 How do the exception ref and data id get set?
bcwhite 2017/03/14 12:53:26 The data_id is set when the user-data object was o
891 if (exception_data) {
892 exception_data->Reset();
893 return *exception_data;
894 }
895
896 // Get the reserved "exception data" storage, if exists.
manzagop (departed) 2017/02/27 16:05:15 IIUC only the first excepting thread can have data
bcwhite 2017/03/14 12:53:26 I'll refill the g_exception_data from time to time
897 exception_data.reset(reinterpret_cast<ActivityUserData*>(
898 subtle::NoBarrier_AtomicExchange(&g_exception_data, 0)));
899 if (!exception_data)
900 return g_data_sink.Get();
901 return *exception_data;
837 } 902 }
838 903
839 bool ThreadActivityTracker::IsValid() const { 904 bool ThreadActivityTracker::IsValid() const {
840 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 || 905 if (header_->owner.data_id.load(std::memory_order_acquire) == 0 ||
841 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 || 906 header_->owner.process_id == 0 || header_->thread_ref.as_id == 0 ||
842 header_->start_time == 0 || header_->start_ticks == 0 || 907 header_->start_time == 0 || header_->start_ticks == 0 ||
843 header_->stack_slots != stack_slots_ || 908 header_->stack_slots != stack_slots_ ||
844 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') { 909 header_->thread_name[sizeof(header_->thread_name) - 1] != '\0') {
845 return false; 910 return false;
846 } 911 }
(...skipping 24 matching lines...) Expand all
871 936
872 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { 937 for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
873 // Remember the data IDs to ensure nothing is replaced during the snapshot 938 // Remember the data IDs to ensure nothing is replaced during the snapshot
874 // operation. Use "acquire" so that all the non-atomic fields of the 939 // operation. Use "acquire" so that all the non-atomic fields of the
875 // structure are valid (at least at the current moment in time). 940 // structure are valid (at least at the current moment in time).
876 const uint32_t starting_id = 941 const uint32_t starting_id =
877 header_->owner.data_id.load(std::memory_order_acquire); 942 header_->owner.data_id.load(std::memory_order_acquire);
878 const int64_t starting_process_id = header_->owner.process_id; 943 const int64_t starting_process_id = header_->owner.process_id;
879 const int64_t starting_thread_id = header_->thread_ref.as_id; 944 const int64_t starting_thread_id = header_->thread_ref.as_id;
880 945
881 // Write a non-zero value to |stack_unchanged| so it's possible to detect 946 // Write a non-zero value to |data_unchanged| so it's possible to detect
882 // at the end that nothing has changed since copying the data began. A 947 // at the end that nothing has changed since copying the data began. A
883 // "cst" operation is required to ensure it occurs before everything else. 948 // "cst" operation is required to ensure it occurs before everything else.
884 // Using "cst" memory ordering is relatively expensive but this is only 949 // Using "cst" memory ordering is relatively expensive but this is only
885 // done during analysis so doesn't directly affect the worker threads. 950 // done during analysis so doesn't directly affect the worker threads.
886 header_->stack_unchanged.store(1, std::memory_order_seq_cst); 951 header_->data_unchanged.store(1, std::memory_order_seq_cst);
887 952
888 // Fetching the current depth also "acquires" the contents of the stack. 953 // Fetching the current depth also "acquires" the contents of the stack.
889 depth = header_->current_depth.load(std::memory_order_acquire); 954 depth = header_->current_depth.load(std::memory_order_acquire);
890 uint32_t count = std::min(depth, stack_slots_); 955 uint32_t count = std::min(depth, stack_slots_);
891 output_snapshot->activity_stack.resize(count); 956 output_snapshot->activity_stack.resize(count);
892 if (count > 0) { 957 if (count > 0) {
893 // Copy the existing contents. Memcpy is used for speed. 958 // Copy the existing contents. Memcpy is used for speed.
894 memcpy(&output_snapshot->activity_stack[0], stack_, 959 memcpy(&output_snapshot->activity_stack[0], stack_,
895 count * sizeof(Activity)); 960 count * sizeof(Activity));
896 } 961 }
897 962
963 // Capture the last exception.
964 memcpy(&output_snapshot->last_exception, &header_->last_exception,
965 sizeof(Activity));
966
967 // TODO(bcwhite): Snapshot other things here.
968
898 // Retry if something changed during the copy. A "cst" operation ensures 969 // Retry if something changed during the copy. A "cst" operation ensures
899 // it must happen after all the above operations. 970 // it must happen after all the above operations.
900 if (!header_->stack_unchanged.load(std::memory_order_seq_cst)) 971 if (!header_->data_unchanged.load(std::memory_order_seq_cst))
901 continue; 972 continue;
902 973
903 // Stack copied. Record it's full depth. 974 // Stack copied. Record it's full depth.
904 output_snapshot->activity_stack_depth = depth; 975 output_snapshot->activity_stack_depth = depth;
905 976
906 // TODO(bcwhite): Snapshot other things here.
907
908 // Get the general thread information. 977 // Get the general thread information.
909 output_snapshot->thread_name = 978 output_snapshot->thread_name =
910 std::string(header_->thread_name, sizeof(header_->thread_name) - 1); 979 std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
911 output_snapshot->thread_id = header_->thread_ref.as_id; 980 output_snapshot->thread_id = header_->thread_ref.as_id;
912 output_snapshot->process_id = header_->owner.process_id; 981 output_snapshot->process_id = header_->owner.process_id;
913 982
914 // All characters of the thread-name buffer were copied so as to not break 983 // All characters of the thread-name buffer were copied so as to not break
915 // if the trailing NUL were missing. Now limit the length if the actual 984 // if the trailing NUL were missing. Now limit the length if the actual
916 // name is shorter. 985 // name is shorter.
917 output_snapshot->thread_name.resize( 986 output_snapshot->thread_name.resize(
(...skipping 15 matching lines...) Expand all
933 1002
934 // Change all the timestamps in the activities from "ticks" to "wall" time. 1003 // Change all the timestamps in the activities from "ticks" to "wall" time.
935 const Time start_time = Time::FromInternalValue(header_->start_time); 1004 const Time start_time = Time::FromInternalValue(header_->start_time);
936 const int64_t start_ticks = header_->start_ticks; 1005 const int64_t start_ticks = header_->start_ticks;
937 for (Activity& activity : output_snapshot->activity_stack) { 1006 for (Activity& activity : output_snapshot->activity_stack) {
938 activity.time_internal = 1007 activity.time_internal =
939 (start_time + 1008 (start_time +
940 TimeDelta::FromInternalValue(activity.time_internal - start_ticks)) 1009 TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
941 .ToInternalValue(); 1010 .ToInternalValue();
942 } 1011 }
1012 output_snapshot->last_exception.time_internal =
1013 (start_time +
1014 TimeDelta::FromInternalValue(
1015 output_snapshot->last_exception.time_internal - start_ticks))
1016 .ToInternalValue();
943 1017
944 // Success! 1018 // Success!
945 return true; 1019 return true;
946 } 1020 }
947 1021
948 // Too many attempts. 1022 // Too many attempts.
949 return false; 1023 return false;
950 } 1024 }
951 1025
952 const void* ThreadActivityTracker::GetBaseAddress() { 1026 const void* ThreadActivityTracker::GetBaseAddress() {
(...skipping 11 matching lines...) Expand all
964 int64_t* out_stamp) { 1038 int64_t* out_stamp) {
965 const Header* header = reinterpret_cast<const Header*>(memory); 1039 const Header* header = reinterpret_cast<const Header*>(memory);
966 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp); 1040 return OwningProcess::GetOwningProcessId(&header->owner, out_id, out_stamp);
967 } 1041 }
968 1042
969 // static 1043 // static
970 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) { 1044 size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
971 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header); 1045 return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
972 } 1046 }
973 1047
1048 std::unique_ptr<ActivityUserData>
1049 ThreadActivityTracker::CreateUserDataForActivity(
1050 Activity* activity,
1051 ActivityTrackerMemoryAllocator* allocator) {
1052 DCHECK_EQ(0U, activity->user_data_ref);
1053
1054 PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
1055 void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
1056 if (memory) {
1057 std::unique_ptr<ActivityUserData> user_data =
1058 MakeUnique<ActivityUserData>(memory, kUserDataSize);
1059 activity->user_data_ref = ref;
1060 activity->user_data_id = user_data->id();
1061 return user_data;
1062 }
1063
1064 // Return a dummy object that will still accept (but ignore) Set() calls.
1065 return MakeUnique<ActivityUserData>();
1066 }
1067
974 // The instantiation of the GlobalActivityTracker object. 1068 // The instantiation of the GlobalActivityTracker object.
975 // The object held here will obviously not be destructed at process exit 1069 // The object held here will obviously not be destructed at process exit
976 // but that's best since PersistentMemoryAllocator objects (that underlie 1070 // but that's best since PersistentMemoryAllocator objects (that underlie
977 // GlobalActivityTracker objects) are explicitly forbidden from doing anything 1071 // GlobalActivityTracker objects) are explicitly forbidden from doing anything
978 // essential at exit anyway due to the fact that they depend on data managed 1072 // essential at exit anyway due to the fact that they depend on data managed
979 // elsewhere and which could be destructed first. An AtomicWord is used instead 1073 // elsewhere and which could be destructed first. An AtomicWord is used instead
980 // of std::atomic because the latter can create global ctors and dtors. 1074 // of std::atomic because the latter can create global ctors and dtors.
981 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0; 1075 subtle::AtomicWord GlobalActivityTracker::g_tracker_ = 0;
982 1076
983 GlobalActivityTracker::ModuleInfo::ModuleInfo() {} 1077 GlobalActivityTracker::ModuleInfo::ModuleInfo() {}
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
1117 } 1211 }
1118 1212
1119 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() { 1213 ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
1120 if (!user_data_) { 1214 if (!user_data_) {
1121 if (tracker_) { 1215 if (tracker_) {
1122 GlobalActivityTracker* global = GlobalActivityTracker::Get(); 1216 GlobalActivityTracker* global = GlobalActivityTracker::Get();
1123 AutoLock lock(global->user_data_allocator_lock_); 1217 AutoLock lock(global->user_data_allocator_lock_);
1124 user_data_ = 1218 user_data_ =
1125 tracker_->GetUserData(activity_id_, &global->user_data_allocator_); 1219 tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
1126 } else { 1220 } else {
1127 user_data_ = MakeUnique<ActivityUserData>(nullptr, 0); 1221 user_data_ = MakeUnique<ActivityUserData>();
1128 } 1222 }
1129 } 1223 }
1130 return *user_data_; 1224 return *user_data_;
1131 } 1225 }
1132 1226
1133 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory, 1227 GlobalActivityTracker::ThreadSafeUserData::ThreadSafeUserData(void* memory,
1134 size_t size) 1228 size_t size)
1135 : ActivityUserData(memory, size) {} 1229 : ActivityUserData(memory, size) {}
1136 1230
1137 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {} 1231 GlobalActivityTracker::ThreadSafeUserData::~ThreadSafeUserData() {}
(...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after
1549 1643
1550 // Remove the destructed tracker from the set of known ones. 1644 // Remove the destructed tracker from the set of known ones.
1551 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed)); 1645 DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
1552 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed); 1646 thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
1553 1647
1554 // Release this memory for re-use at a later time. 1648 // Release this memory for re-use at a later time.
1555 base::AutoLock autolock(thread_tracker_allocator_lock_); 1649 base::AutoLock autolock(thread_tracker_allocator_lock_);
1556 thread_tracker_allocator_.ReleaseObjectReference(mem_reference); 1650 thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
1557 } 1651 }
1558 1652
1653 ActivityUserData& GlobalActivityTracker::RecordExceptionImpl(
1654 const void* pc,
1655 const void* origin) {
1656 ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
manzagop (departed) 2017/02/27 16:05:16 Comment on why we don't use GetOrCreate and why it
bcwhite 2017/03/14 12:53:26 Done.
1657 if (!tracker)
1658 return g_data_sink.Get();
1659
1660 return tracker->ExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
1661 ActivityData::ForGeneric(0, 0));
1662 }
1663
1664 void GlobalActivityTracker::ReserveExceptionData() {
1665 if (subtle::NoBarrier_Load(&g_exception_data))
1666 return;
1667
1668 ActivityUserData* exception_data = new ActivityUserData(...);
manzagop (departed) 2017/02/27 16:05:16 ...?
bcwhite 2017/03/14 12:53:26 "..." is a C++11 feature that automatically create
1669 subtle::AtomicWord prev_data = subtle::NoBarrier_AtomicExchange(
1670 &g_exception_data, reinterpret_cast<uintptr_t>(exception_data));
1671
1672 // Handle case of two threads doing the same thing at the same time.
1673 if (prev_data)
1674 delete reinterpret_cast<ActivityUserData*>(prev_data);
1675 }
1676
1559 // static 1677 // static
1560 void GlobalActivityTracker::OnTLSDestroy(void* value) { 1678 void GlobalActivityTracker::OnTLSDestroy(void* value) {
1561 delete reinterpret_cast<ManagedActivityTracker*>(value); 1679 delete reinterpret_cast<ManagedActivityTracker*>(value);
1562 } 1680 }
1563 1681
1564 ScopedActivity::ScopedActivity(const void* program_counter, 1682 ScopedActivity::ScopedActivity(const void* program_counter,
1565 uint8_t action, 1683 uint8_t action,
1566 uint32_t id, 1684 uint32_t id,
1567 int32_t info) 1685 int32_t info)
1568 : GlobalActivityTracker::ScopedThreadActivity( 1686 : GlobalActivityTracker::ScopedThreadActivity(
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1639 : GlobalActivityTracker::ScopedThreadActivity( 1757 : GlobalActivityTracker::ScopedThreadActivity(
1640 program_counter, 1758 program_counter,
1641 nullptr, 1759 nullptr,
1642 Activity::ACT_PROCESS_WAIT, 1760 Activity::ACT_PROCESS_WAIT,
1643 ActivityData::ForProcess(process->Pid()), 1761 ActivityData::ForProcess(process->Pid()),
1644 /*lock_allowed=*/true) {} 1762 /*lock_allowed=*/true) {}
1645 #endif 1763 #endif
1646 1764
1647 } // namespace debug 1765 } // namespace debug
1648 } // namespace base 1766 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698