Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2215)

Unified Diff: base/debug/activity_tracker.cc

Issue 2702413006: Enable storing last-dispatched exception per-thread. (Closed)
Patch Set: harden exception recording Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/debug/activity_tracker.cc
diff --git a/base/debug/activity_tracker.cc b/base/debug/activity_tracker.cc
index 59a66dfd097571051d89a6e3b8cc9ff583cc7ed7..e07ed2f3e3f7098be7632597c00f9b3cd2860e25 100644
--- a/base/debug/activity_tracker.cc
+++ b/base/debug/activity_tracker.cc
@@ -9,10 +9,12 @@
#include <utility>
#include "base/atomic_sequence_num.h"
+#include "base/atomicops.h"
#include "base/debug/stack_trace.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/memory_mapped_file.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
@@ -52,6 +54,13 @@ const char kProcessPhaseDataKey[] = "process-phase";
// in the same memory space.
StaticAtomicSequenceNumber g_next_id;
+// An reusable user-data sink that just discards all data saved to it.
+LazyInstance<ActivityUserData>::Leaky g_data_sink;
+
+// A pointer to an AcitivyUserData object that can used when handing exceptions.
manzagop (departed) 2017/02/27 16:05:16 typo: AcitivyUserData, handing
bcwhite 2017/03/14 12:53:26 Done.
+// This is an AtomicWord because std::atomic creates global ctors & dtors.
+subtle::AtomicWord g_exception_data = 0;
+
union ThreadRef {
int64_t as_id;
#if defined(OS_WIN)
@@ -316,6 +325,8 @@ ActivityUserData::MemoryHeader::~MemoryHeader() {}
ActivityUserData::FieldHeader::FieldHeader() {}
ActivityUserData::FieldHeader::~FieldHeader() {}
+ActivityUserData::ActivityUserData() : ActivityUserData(nullptr, 0) {}
+
ActivityUserData::ActivityUserData(void* memory, size_t size)
: memory_(reinterpret_cast<char*>(memory)),
available_(RoundDownToAlignment(size, kMemoryAlignment)),
@@ -325,19 +336,26 @@ ActivityUserData::ActivityUserData(void* memory, size_t size)
return;
static_assert(0 == sizeof(MemoryHeader) % kMemoryAlignment, "invalid header");
- DCHECK_LT(sizeof(MemoryHeader), available_);
- if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
- header_->owner.Release_Initialize();
- memory_ += sizeof(MemoryHeader);
- available_ -= sizeof(MemoryHeader);
-
- // If there is already data present, load that. This allows the same class
- // to be used for analysis through snapshots.
- ImportExistingData();
+ Initialize();
}
ActivityUserData::~ActivityUserData() {}
+void ActivityUserData::Reset() {
manzagop (departed) 2017/02/27 16:05:16 DCHECK memory_/header_ or early exit.
bcwhite 2017/03/14 12:53:26 Done.
+ // Clear the memory in an atomic manner.
manzagop (departed) 2017/02/27 16:05:16 ActivityUserData is either used in a thread affine
bcwhite 2017/03/14 12:53:26 It's written with thread affinity but another (ana
+ std::atomic<int>* data = reinterpret_cast<std::atomic<int>*>(header_);
+ const uint32_t words = (reinterpret_cast<uintptr_t>(memory_) -
+ reinterpret_cast<uintptr_t>(header_)) /
+ sizeof(int);
+ for (uint32_t i = 0; i < words; ++i) {
+ data->store(0, std::memory_order_release);
+ ++data;
+ }
+
+ values_.clear();
manzagop (departed) 2017/02/27 16:05:16 Also reset memory_ and available_?
bcwhite 2017/03/14 12:53:25 Oops! Done.
+ Initialize();
+}
+
bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
DCHECK(output_snapshot);
DCHECK(output_snapshot->empty());
@@ -513,6 +531,18 @@ void ActivityUserData::SetReference(StringPiece name,
Set(name, type, &rec, sizeof(rec));
}
+void ActivityUserData::Initialize() {
manzagop (departed) 2017/02/27 16:05:16 DCHECK memory_ or early return.
bcwhite 2017/03/14 12:53:26 Done.
+ DCHECK_LT(sizeof(MemoryHeader), available_);
+ if (header_->owner.data_id.load(std::memory_order_acquire) == 0)
+ header_->owner.Release_Initialize();
+ memory_ += sizeof(MemoryHeader);
+ available_ -= sizeof(MemoryHeader);
+
+ // If there is already data present, load that. This allows the same class
+ // to be used for analysis through snapshots.
+ ImportExistingData();
manzagop (departed) 2017/02/27 16:05:16 Worth skipping the import when just initialized?
bcwhite 2017/03/14 12:53:26 Not really since the first read will cause it to e
+}
+
void ActivityUserData::ImportExistingData() const {
while (available_ > sizeof(FieldHeader)) {
FieldHeader* header = reinterpret_cast<FieldHeader*>(memory_);
@@ -557,7 +587,8 @@ struct ThreadActivityTracker::Header {
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize =
- OwningProcess::kExpectedInstanceSize + 72;
+ OwningProcess::kExpectedInstanceSize + Activity::kExpectedInstanceSize +
+ 72;
// This information uniquely identifies a process.
OwningProcess owner;
@@ -585,7 +616,7 @@ struct ThreadActivityTracker::Header {
// won't be recorded.
std::atomic<uint32_t> current_depth;
- // A memory location used to indicate if changes have been made to the stack
+ // A memory location used to indicate if changes have been made to the data
// that would invalidate an in-progress read of its contents. The active
// tracker will zero the value whenever something gets popped from the
// stack. A monitoring tracker can write a non-zero value here, copy the
@@ -593,7 +624,11 @@ struct ThreadActivityTracker::Header {
// the contents didn't change while being copied. This can handle concurrent
// snapshot operations only if each snapshot writes a different bit (which
// is not the current implementation so no parallel snapshots allowed).
- std::atomic<uint32_t> stack_unchanged;
+ std::atomic<uint32_t> data_unchanged;
+
+ // The last "exception" activity. This can't be stored on the stack because
+ // that could get popped as things unwind.
+ Activity last_exception;
// The name of the thread (up to a maximum length). Dynamic-length names
// are not practical since the memory has to come from the same persistent
@@ -671,7 +706,7 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
DCHECK_EQ(0, header_->start_ticks);
DCHECK_EQ(0U, header_->stack_slots);
DCHECK_EQ(0U, header_->current_depth.load(std::memory_order_relaxed));
- DCHECK_EQ(0U, header_->stack_unchanged.load(std::memory_order_relaxed));
+ DCHECK_EQ(0U, header_->data_unchanged.load(std::memory_order_relaxed));
DCHECK_EQ(0, stack_[0].time_internal);
DCHECK_EQ(0U, stack_[0].origin_address);
DCHECK_EQ(0U, stack_[0].call_stack[0]);
@@ -785,40 +820,28 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
- // have written a non-zero value into |stack_unchanged|; clearing it here
+ // have written a non-zero value into |data_unchanged|; clearing it here
// will let that thread detect that something did change. This needs to
// happen after the atomic |depth| operation above so a "release" store
// is required.
- header_->stack_unchanged.store(0, std::memory_order_release);
+ header_->data_unchanged.store(0, std::memory_order_release);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
ActivityId id,
ActivityTrackerMemoryAllocator* allocator) {
- // User-data is only stored for activities actually held in the stack.
- if (id < stack_slots_) {
- // Don't allow user data for lock acquisition as recursion may occur.
- if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
- NOTREACHED();
- return MakeUnique<ActivityUserData>(nullptr, 0);
- }
-
- // Get (or reuse) a block of memory and create a real UserData object
- // on it.
- PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
- void* memory =
- allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
- if (memory) {
- std::unique_ptr<ActivityUserData> user_data =
- MakeUnique<ActivityUserData>(memory, kUserDataSize);
- stack_[id].user_data_ref = ref;
- stack_[id].user_data_id = user_data->id();
- return user_data;
- }
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>();
}
- // Return a dummy object that will still accept (but ignore) Set() calls.
- return MakeUnique<ActivityUserData>(nullptr, 0);
+ // User-data is only stored for activities actually held in the stack.
+ if (id >= stack_slots_)
+ return MakeUnique<ActivityUserData>();
+
+ // Create and return a real UserData object.
+ return CreateUserDataForActivity(&stack_[id], allocator);
}
bool ThreadActivityTracker::HasUserData(ActivityId id) {
@@ -834,6 +857,48 @@ void ThreadActivityTracker::ReleaseUserData(
allocator->ReleaseObjectReference(stack_[id].user_data_ref);
stack_[id].user_data_ref = 0;
}
+ // Release user-data for an exception.
manzagop (departed) 2017/02/27 16:05:16 Does this clear the exception data as soon as we p
bcwhite 2017/03/14 12:53:26 No. That is held completely independent of the ac
+ if (header_->last_exception.user_data_ref) {
+ exception_data.reset();
+ allocator->ReleaseObjectReference(header_->last_exception.user_data_ref);
+ header_->last_exception.user_data_ref = 0;
+ }
+}
+
+ActivityUserData& ThreadActivityTracker::ExceptionActivity(
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ // A thread-checker creates a lock to check the thread-id which means
+ // re-entry into this code if lock acquisitions are being tracked.
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ // If there was a previous exception, release its user-data.
+ if (header_->last_exception.user_data_ref) {
+ header_->last_exception.user_data_ref = 0;
manzagop (departed) 2017/02/27 16:05:16 This doesn't release the reference?
bcwhite 2017/03/14 12:53:26 Yes. I hadn't finished the conversion from the fi
+ }
+
+ // Fill the reusable exception activity.
+ Activity::FillFrom(&header_->last_exception, program_counter, origin, type,
+ data);
+
+ // The data has changed meaning that some other thread trying to copy the
+ // contents for reporting purposes could get bad data.
+ header_->data_unchanged.store(0, std::memory_order_relaxed);
+
+ // Re-use any existing user-data structure.
manzagop (departed) 2017/02/27 16:05:16 How do the exception ref and data id get set?
bcwhite 2017/03/14 12:53:26 The data_id is set when the user-data object was o
+ if (exception_data) {
+ exception_data->Reset();
+ return *exception_data;
+ }
+
+ // Get the reserved "exception data" storage, if exists.
manzagop (departed) 2017/02/27 16:05:15 IIUC only the first excepting thread can have data
bcwhite 2017/03/14 12:53:26 I'll refill the g_exception_data from time to time
+ exception_data.reset(reinterpret_cast<ActivityUserData*>(
+ subtle::NoBarrier_AtomicExchange(&g_exception_data, 0)));
+ if (!exception_data)
+ return g_data_sink.Get();
+ return *exception_data;
}
bool ThreadActivityTracker::IsValid() const {
@@ -878,12 +943,12 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
const int64_t starting_process_id = header_->owner.process_id;
const int64_t starting_thread_id = header_->thread_ref.as_id;
- // Write a non-zero value to |stack_unchanged| so it's possible to detect
+ // Write a non-zero value to |data_unchanged| so it's possible to detect
// at the end that nothing has changed since copying the data began. A
// "cst" operation is required to ensure it occurs before everything else.
// Using "cst" memory ordering is relatively expensive but this is only
// done during analysis so doesn't directly affect the worker threads.
- header_->stack_unchanged.store(1, std::memory_order_seq_cst);
+ header_->data_unchanged.store(1, std::memory_order_seq_cst);
// Fetching the current depth also "acquires" the contents of the stack.
depth = header_->current_depth.load(std::memory_order_acquire);
@@ -895,16 +960,20 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
count * sizeof(Activity));
}
+ // Capture the last exception.
+ memcpy(&output_snapshot->last_exception, &header_->last_exception,
+ sizeof(Activity));
+
+ // TODO(bcwhite): Snapshot other things here.
+
// Retry if something changed during the copy. A "cst" operation ensures
// it must happen after all the above operations.
- if (!header_->stack_unchanged.load(std::memory_order_seq_cst))
+ if (!header_->data_unchanged.load(std::memory_order_seq_cst))
continue;
// Stack copied. Record it's full depth.
output_snapshot->activity_stack_depth = depth;
- // TODO(bcwhite): Snapshot other things here.
-
// Get the general thread information.
output_snapshot->thread_name =
std::string(header_->thread_name, sizeof(header_->thread_name) - 1);
@@ -940,6 +1009,11 @@ bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
TimeDelta::FromInternalValue(activity.time_internal - start_ticks))
.ToInternalValue();
}
+ output_snapshot->last_exception.time_internal =
+ (start_time +
+ TimeDelta::FromInternalValue(
+ output_snapshot->last_exception.time_internal - start_ticks))
+ .ToInternalValue();
// Success!
return true;
@@ -971,6 +1045,26 @@ size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
return static_cast<size_t>(stack_depth) * sizeof(Activity) + sizeof(Header);
}
+std::unique_ptr<ActivityUserData>
+ThreadActivityTracker::CreateUserDataForActivity(
+ Activity* activity,
+ ActivityTrackerMemoryAllocator* allocator) {
+ DCHECK_EQ(0U, activity->user_data_ref);
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
+ void* memory = allocator->GetAsArray<char>(ref, kUserDataSize);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ activity->user_data_ref = ref;
+ activity->user_data_id = user_data->id();
+ return user_data;
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>();
+}
+
// The instantiation of the GlobalActivityTracker object.
// The object held here will obviously not be destructed at process exit
// but that's best since PersistentMemoryAllocator objects (that underlie
@@ -1124,7 +1218,7 @@ ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
user_data_ =
tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
} else {
- user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ user_data_ = MakeUnique<ActivityUserData>();
}
}
return *user_data_;
@@ -1556,6 +1650,30 @@ void GlobalActivityTracker::ReturnTrackerMemory(
thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
}
+ActivityUserData& GlobalActivityTracker::RecordExceptionImpl(
+ const void* pc,
+ const void* origin) {
+ ThreadActivityTracker* tracker = GetTrackerForCurrentThread();
manzagop (departed) 2017/02/27 16:05:16 Comment on why we don't use GetOrCreate and why it
bcwhite 2017/03/14 12:53:26 Done.
+ if (!tracker)
+ return g_data_sink.Get();
+
+ return tracker->ExceptionActivity(pc, origin, Activity::ACT_EXCEPTION,
+ ActivityData::ForGeneric(0, 0));
+}
+
+void GlobalActivityTracker::ReserveExceptionData() {
+ if (subtle::NoBarrier_Load(&g_exception_data))
+ return;
+
+ ActivityUserData* exception_data = new ActivityUserData(...);
manzagop (departed) 2017/02/27 16:05:16 ...?
bcwhite 2017/03/14 12:53:26 "..." is a C++11 feature that automatically create
+ subtle::AtomicWord prev_data = subtle::NoBarrier_AtomicExchange(
+ &g_exception_data, reinterpret_cast<uintptr_t>(exception_data));
+
+ // Handle case of two threads doing the same thing at the same time.
+ if (prev_data)
+ delete reinterpret_cast<ActivityUserData*>(prev_data);
+}
+
// static
void GlobalActivityTracker::OnTLSDestroy(void* value) {
delete reinterpret_cast<ManagedActivityTracker*>(value);

Powered by Google App Engine
This is Rietveld 408576698