Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1135)

Unified Diff: base/tracked_objects.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Figure out where the @#$%! corruption is coming from. Move heap tracking to TaskStopwatch." Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/tracked_objects.cc
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 675c9b89e6747ff288902e78ca280f006045281b..13a15da1b9f4de3b3ee85dab1f5f23282cdf3178 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -12,6 +12,7 @@
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/debug/leak_annotations.h"
+#include "base/debug/scoped_thread_heap_usage.h"
#include "base/logging.h"
#include "base/process/process_handle.h"
#include "base/strings/stringprintf.h"
@@ -74,6 +75,19 @@ inline bool IsProfilerTimingEnabled() {
return current_timing_enabled == ENABLED_TIMING;
}
+void SaturatingAdd(const uint32_t addend, base::subtle::Atomic32* sum) {
+ // Bail quick if no work or alreadu saturated.
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 typo: alreadu
Sigurður Ásgeirsson 2016/10/14 20:11:35 Done.
+ if (addend == 0U || *sum == INT_MAX)
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 I think you are supposed to use at least NoBarrier
Sigurður Ásgeirsson 2016/10/14 20:11:36 I'm modeling on what's done elsewhere in this file
+ return;
+
+ // Check for ovewflow.
+ int32_t new_sum = *sum + addend;
+ if (new_sum < *sum)
+ new_sum = INT_MAX;
+
+ base::subtle::NoBarrier_Store(sum, new_sum);
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 this is not atomic anymore now? Don't you want a N
Sigurður Ásgeirsson 2016/10/14 20:11:35 This thread is the only writer, and in the interes
+}
+
} // namespace
//------------------------------------------------------------------------------
@@ -88,8 +102,13 @@ DeathData::DeathData()
queue_duration_max_(0),
run_duration_sample_(0),
queue_duration_sample_(0),
- last_phase_snapshot_(nullptr) {
-}
+ alloc_ops_(0),
+ free_ops_(0),
+ allocated_bytes_(0),
+ freed_bytes_(0),
+ alloc_overhead_bytes_(0),
+ max_allocated_bytes_(0),
+ last_phase_snapshot_(nullptr) {}
DeathData::DeathData(const DeathData& other)
: count_(other.count_),
@@ -100,6 +119,12 @@ DeathData::DeathData(const DeathData& other)
queue_duration_max_(other.queue_duration_max_),
run_duration_sample_(other.run_duration_sample_),
queue_duration_sample_(other.queue_duration_sample_),
+ alloc_ops_(other.alloc_ops_),
+ free_ops_(other.free_ops_),
+ allocated_bytes_(other.allocated_bytes_),
+ freed_bytes_(other.freed_bytes_),
+ alloc_overhead_bytes_(other.alloc_overhead_bytes_),
+ max_allocated_bytes_(other.max_allocated_bytes_),
last_phase_snapshot_(nullptr) {
// This constructor will be used by std::map when adding new DeathData values
// to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
@@ -125,9 +150,9 @@ DeathData::~DeathData() {
#define CONDITIONAL_ASSIGN(assign_it, target, source) \
((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
-void DeathData::RecordDeath(const int32_t queue_duration,
- const int32_t run_duration,
- const uint32_t random_number) {
+void DeathData::RecordDurations(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number) {
// We'll just clamp at INT_MAX, but we should note this in the UI as such.
if (count_ < INT_MAX)
base::subtle::NoBarrier_Store(&count_, count_ + 1);
@@ -164,12 +189,29 @@ void DeathData::RecordDeath(const int32_t queue_duration,
}
}
+void DeathData::RecordAllocations(const uint32_t alloc_ops,
+ const uint32_t free_ops,
+ const uint32_t allocated_bytes,
+ const uint32_t freed_bytes,
+ const uint32_t alloc_overhead_bytes,
+ const uint32_t max_allocated_bytes) {
+ // Use saturating arithmetic.
+ SaturatingAdd(alloc_ops, &alloc_ops_);
+ SaturatingAdd(free_ops, &free_ops_);
+ SaturatingAdd(allocated_bytes, &allocated_bytes_);
+ SaturatingAdd(freed_bytes, &freed_bytes_);
+ SaturatingAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
+
+ if (max_allocated_bytes > INT_MAX)
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 I think base::saturated_cast might help here?
Sigurður Ásgeirsson 2016/10/14 20:11:35 Thanks, neat! I didn't know of that.
+ base::subtle::NoBarrier_Store(&max_allocated_bytes_, INT_MAX);
+ else if (static_cast<int32_t>(max_allocated_bytes) > max_allocated_bytes_)
+ base::subtle::NoBarrier_Store(&max_allocated_bytes_, max_allocated_bytes);
+}
+
void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
// Snapshotting and storing current state.
- last_phase_snapshot_ = new DeathDataPhaseSnapshot(
- profiling_phase, count(), run_duration_sum(), run_duration_max(),
- run_duration_sample(), queue_duration_sum(), queue_duration_max(),
- queue_duration_sample(), last_phase_snapshot_);
+ last_phase_snapshot_ =
+ new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
// Not touching fields for which a delta can be computed by comparing with a
// snapshot from the previous phase. Resetting other fields. Sample values
@@ -209,8 +251,13 @@ DeathDataSnapshot::DeathDataSnapshot()
run_duration_sample(-1),
queue_duration_sum(-1),
queue_duration_max(-1),
- queue_duration_sample(-1) {
-}
+ queue_duration_sample(-1),
+ alloc_ops(-1),
+ free_ops(-1),
+ allocated_bytes(-1),
+ freed_bytes(-1),
+ alloc_overhead_bytes(-1),
+ max_allocated_bytes(-1) {}
DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sum,
@@ -218,25 +265,55 @@ DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sample,
int32_t queue_duration_sum,
int32_t queue_duration_max,
- int32_t queue_duration_sample)
+ int32_t queue_duration_sample,
+ int32_t alloc_ops,
+ int32_t free_ops,
+ int32_t allocated_bytes,
+ int32_t freed_bytes,
+ int32_t alloc_overhead_bytes,
+ int32_t max_allocated_bytes)
: count(count),
run_duration_sum(run_duration_sum),
run_duration_max(run_duration_max),
run_duration_sample(run_duration_sample),
queue_duration_sum(queue_duration_sum),
queue_duration_max(queue_duration_max),
- queue_duration_sample(queue_duration_sample) {}
+ queue_duration_sample(queue_duration_sample),
+ alloc_ops(alloc_ops),
+ free_ops(free_ops),
+ allocated_bytes(allocated_bytes),
+ freed_bytes(freed_bytes),
+ alloc_overhead_bytes(alloc_overhead_bytes),
+ max_allocated_bytes(max_allocated_bytes) {}
+
+DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
+ : count(death_data.count()),
+ run_duration_sum(death_data.run_duration_sum()),
+ run_duration_max(death_data.run_duration_max()),
+ run_duration_sample(death_data.run_duration_sample()),
+ queue_duration_sum(death_data.queue_duration_sum()),
+ queue_duration_max(death_data.queue_duration_max()),
+ queue_duration_sample(death_data.queue_duration_sample()),
+ alloc_ops(death_data.alloc_ops()),
+ free_ops(death_data.free_ops()),
+ allocated_bytes(death_data.allocated_bytes()),
+ freed_bytes(death_data.freed_bytes()),
+ alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
+ max_allocated_bytes(death_data.max_allocated_bytes()) {}
DeathDataSnapshot::~DeathDataSnapshot() {
}
DeathDataSnapshot DeathDataSnapshot::Delta(
const DeathDataSnapshot& older) const {
- return DeathDataSnapshot(count - older.count,
- run_duration_sum - older.run_duration_sum,
- run_duration_max, run_duration_sample,
- queue_duration_sum - older.queue_duration_sum,
- queue_duration_max, queue_duration_sample);
+ return DeathDataSnapshot(
+ count - older.count, run_duration_sum - older.run_duration_sum,
+ run_duration_max, run_duration_sample,
+ queue_duration_sum - older.queue_duration_sum, queue_duration_max,
+ queue_duration_sample, alloc_ops - older.alloc_ops,
+ free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
+ freed_bytes - older.freed_bytes,
+ alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
}
//------------------------------------------------------------------------------
@@ -455,7 +532,8 @@ void ThreadData::Snapshot(int current_profiling_phase,
if (birth_count.second > 0) {
current_phase_tasks->push_back(
TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
- DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
+ DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0),
"Still_Alive"));
}
}
@@ -514,7 +592,21 @@ void ThreadData::TallyADeath(const Births& births,
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
death_data = &death_map_[&births];
} // Release lock ASAP.
- death_data->RecordDeath(queue_duration, run_duration, random_number_);
+ death_data->RecordDurations(queue_duration, run_duration, random_number_);
+
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ // TODO(siggi): Make this conditional on whether heap tracking is enabled.
+ // TODO(siggi): Should these be passed as uint64_t perhaps?
+ // DO NOT SUBMIT
+ base::debug::ThreadAllocatorUsage heap_usage = stopwatch.heap_usage().usage();
+ death_data->RecordAllocations(
+ static_cast<int32_t>(heap_usage.alloc_ops),
+ static_cast<int32_t>(heap_usage.free_ops),
+ static_cast<int32_t>(heap_usage.alloc_bytes),
+ static_cast<int32_t>(heap_usage.free_bytes),
+ static_cast<int32_t>(heap_usage.alloc_overhead_bytes),
+ static_cast<int32_t>(heap_usage.max_allocated_bytes));
+#endif
}
// static
@@ -653,13 +745,7 @@ void ThreadData::SnapshotMaps(int profiling_phase,
for (const auto& death : death_map_) {
deaths->push_back(std::make_pair(
death.first,
- DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
- death.second.run_duration_sum(),
- death.second.run_duration_max(),
- death.second.run_duration_sample(),
- death.second.queue_duration_sum(),
- death.second.queue_duration_max(),
- death.second.queue_duration_sample(),
+ DeathDataPhaseSnapshot(profiling_phase, death.second,
death.second.last_phase_snapshot())));
}
}
@@ -839,6 +925,9 @@ void TaskStopwatch::Start() {
#endif
start_time_ = ThreadData::Now();
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ heap_usage_.Start();
+#endif
current_thread_data_ = ThreadData::Get();
if (!current_thread_data_)
@@ -861,6 +950,10 @@ void TaskStopwatch::Stop() {
DCHECK(state_ == RUNNING);
state_ = STOPPED;
DCHECK(child_ == NULL);
+
+#endif
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+ heap_usage_.Stop(true);
#endif
if (!start_time_.is_null() && !end_time.is_null()) {
@@ -900,6 +993,12 @@ int32_t TaskStopwatch::RunDurationMs() const {
return wallclock_duration_ms_ - excluded_duration_ms_;
}
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+const base::debug::HeapUsageTracker& TaskStopwatch::heap_usage() const {
+ return heap_usage_;
+}
+#endif
+
ThreadData* TaskStopwatch::GetThreadData() const {
#if DCHECK_IS_ON()
DCHECK(state_ != CREATED);
@@ -913,23 +1012,9 @@ ThreadData* TaskStopwatch::GetThreadData() const {
DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
int profiling_phase,
- int count,
- int32_t run_duration_sum,
- int32_t run_duration_max,
- int32_t run_duration_sample,
- int32_t queue_duration_sum,
- int32_t queue_duration_max,
- int32_t queue_duration_sample,
+ const DeathData& death,
const DeathDataPhaseSnapshot* prev)
- : profiling_phase(profiling_phase),
- death_data(count,
- run_duration_sum,
- run_duration_max,
- run_duration_sample,
- queue_duration_sum,
- queue_duration_max,
- queue_duration_sample),
- prev(prev) {}
+ : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
//------------------------------------------------------------------------------
// TaskSnapshot

Powered by Google App Engine
This is Rietveld 408576698