Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(878)

Unified Diff: base/tracked_objects.cc

Issue 1021053003: Delivering the FIRST_NONEMPTY_PAINT phase changing event to base/ (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@phase_splitting
Patch Set: More asvitkine@ comments. Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/tracked_objects.cc
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 32ec75cd79543b4e1e563446f37e1a26cc52783c..566930acd8d55a148914b676e15f31ae1da600e2 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -15,6 +15,7 @@
#include "base/logging.h"
#include "base/process/process_handle.h"
#include "base/profiler/alternate_timer.h"
+#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/third_party/valgrind/memcheck.h"
#include "base/tracking_info.h"
@@ -227,6 +228,10 @@ int Births::birth_count() const { return birth_count_; }
void Births::RecordBirth() { ++birth_count_; }
+void Births::SubtractBirths(int count) {
+ birth_count_ -= count;
jar (doing other things) 2015/04/01 17:10:25 This is unsafe, unless *all* folks that access thi
vadimt 2015/04/06 23:25:12 Done.
+}
+
//------------------------------------------------------------------------------
// ThreadData maintains the central data for all births and deaths on a single
// thread.
@@ -269,6 +274,10 @@ base::LazyInstance<base::Lock>::Leaky
// static
ThreadData::Status ThreadData::status_ = ThreadData::UNINITIALIZED;
+// static
+base::LazyInstance<PhasedProcessDataSnapshotMap>
+ ThreadData::completed_phases_snapshots_ = LAZY_INSTANCE_INITIALIZER;
+
ThreadData::ThreadData(const std::string& suggested_name)
: next_(NULL),
next_retired_worker_(NULL),
@@ -390,9 +399,26 @@ void ThreadData::OnThreadTerminationCleanup() {
}
// static
-void ThreadData::Snapshot(ProcessDataSnapshot* process_data_snapshot) {
+void ThreadData::Snapshot(int current_profiling_phase,
+ ProcessDataSnapshot* process_data_snapshot) {
+ process_data_snapshot->phased_process_data_snapshots =
+ completed_phases_snapshots_.Get();
+
+ DCHECK(!ContainsKey(process_data_snapshot->phased_process_data_snapshots,
+ current_profiling_phase));
ThreadData::SnapshotCurrentPhase(
- &process_data_snapshot->phased_process_data_snapshots[0]);
+ false, &process_data_snapshot
+ ->phased_process_data_snapshots[current_profiling_phase]);
+}
+
+// static
+void ThreadData::OnProfilingPhaseCompletion(int profiling_phase) {
+ if (!kTrackAllTaskObjects)
+ return; // Not compiled in.
+
+ PhasedProcessDataSnapshotMap& snapshots = completed_phases_snapshots_.Get();
+ DCHECK(!ContainsKey(snapshots, profiling_phase));
+ ThreadData::SnapshotCurrentPhase(true, &snapshots[profiling_phase]);
}
Births* ThreadData::TallyABirth(const Location& location) {
@@ -424,7 +450,7 @@ Births* ThreadData::TallyABirth(const Location& location) {
return child;
}
-void ThreadData::TallyADeath(const Births& birth,
+void ThreadData::TallyADeath(const Births& births,
int32 queue_duration,
const TaskStopwatch& stopwatch) {
int32 run_duration = stopwatch.RunDurationMs();
@@ -433,7 +459,7 @@ void ThreadData::TallyADeath(const Births& birth,
const uint32 kSomePrimeNumber = 2147483647;
random_number_ += queue_duration + run_duration + kSomePrimeNumber;
// An address is going to have some randomness to it as well ;-).
- random_number_ ^= static_cast<uint32>(&birth - reinterpret_cast<Births*>(0));
+ random_number_ ^= static_cast<uint32>(&births - reinterpret_cast<Births*>(0));
// We don't have queue durations without OS timer. OS timer is automatically
// used for task-post-timing, so the use of an alternate timer implies all
@@ -445,20 +471,20 @@ void ThreadData::TallyADeath(const Births& birth,
queue_duration = 0;
}
- DeathMap::iterator it = death_map_.find(&birth);
+ DeathMap::iterator it = death_map_.find(&births);
DeathData* death_data;
if (it != death_map_.end()) {
death_data = &it->second;
} else {
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
- death_data = &death_map_[&birth];
+ death_data = &death_map_[&births];
} // Release lock ASAP.
death_data->RecordDeath(queue_duration, run_duration, random_number_);
if (!kTrackParentChildLinks)
return;
if (!parent_stack_.empty()) { // We might get turned off.
- DCHECK_EQ(parent_stack_.top(), &birth);
+ DCHECK_EQ(parent_stack_.top(), &births);
parent_stack_.pop();
}
}
@@ -486,8 +512,8 @@ void ThreadData::TallyRunOnNamedThreadIfTracking(
// Even if we have been DEACTIVATED, we will process any pending births so
// that our data structures (which counted the outstanding births) remain
// consistent.
- const Births* birth = completed_task.birth_tally;
- if (!birth)
+ const Births* births = completed_task.birth_tally;
+ if (!births)
return;
ThreadData* current_thread_data = stopwatch.GetThreadData();
if (!current_thread_data)
@@ -504,12 +530,12 @@ void ThreadData::TallyRunOnNamedThreadIfTracking(
queue_duration = (start_of_run - completed_task.EffectiveTimePosted())
.InMilliseconds();
}
- current_thread_data->TallyADeath(*birth, queue_duration, stopwatch);
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
}
// static
void ThreadData::TallyRunOnWorkerThreadIfTracking(
- const Births* birth,
+ const Births* births,
const TrackedTime& time_posted,
const TaskStopwatch& stopwatch) {
if (!kTrackAllTaskObjects)
@@ -518,7 +544,7 @@ void ThreadData::TallyRunOnWorkerThreadIfTracking(
// Even if we have been DEACTIVATED, we will process any pending births so
// that our data structures (which counted the outstanding births) remain
// consistent.
- if (!birth)
+ if (!births)
return;
// TODO(jar): Support the option to coalesce all worker-thread activity under
@@ -539,12 +565,12 @@ void ThreadData::TallyRunOnWorkerThreadIfTracking(
if (!start_of_run.is_null()) {
queue_duration = (start_of_run - time_posted).InMilliseconds();
}
- current_thread_data->TallyADeath(*birth, queue_duration, stopwatch);
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
}
// static
void ThreadData::TallyRunInAScopedRegionIfTracking(
- const Births* birth,
+ const Births* births,
const TaskStopwatch& stopwatch) {
if (!kTrackAllTaskObjects)
return; // Not compiled in.
@@ -552,7 +578,7 @@ void ThreadData::TallyRunInAScopedRegionIfTracking(
// Even if we have been DEACTIVATED, we will process any pending births so
// that our data structures (which counted the outstanding births) remain
// consistent.
- if (!birth)
+ if (!births)
return;
ThreadData* current_thread_data = stopwatch.GetThreadData();
@@ -560,11 +586,12 @@ void ThreadData::TallyRunInAScopedRegionIfTracking(
return;
int32 queue_duration = 0;
- current_thread_data->TallyADeath(*birth, queue_duration, stopwatch);
+ current_thread_data->TallyADeath(*births, queue_duration, stopwatch);
}
// static
void ThreadData::SnapshotAllExecutedTasks(
+ bool reset,
ProcessDataPhaseSnapshot* process_data_phase,
BirthCountMap* birth_counts) {
if (!kTrackAllTaskObjects)
@@ -573,6 +600,8 @@ void ThreadData::SnapshotAllExecutedTasks(
// Get an unchanging copy of a ThreadData list.
ThreadData* my_list = ThreadData::first();
+ DeathResetResults death_reset_results;
+
// Gather data serially.
// This hackish approach *can* get some slighly corrupt tallies, as we are
// grabbing values without the protection of a lock, but it has the advantage
@@ -582,18 +611,29 @@ void ThreadData::SnapshotAllExecutedTasks(
for (ThreadData* thread_data = my_list;
thread_data;
thread_data = thread_data->next()) {
- thread_data->SnapshotExecutedTasks(process_data_phase, birth_counts);
+ thread_data->SnapshotExecutedTasks(process_data_phase,
+ reset ? &death_reset_results : nullptr,
+ birth_counts);
+ }
+
+ if (reset) {
+ for (ThreadData* thread_data = my_list; thread_data;
+ thread_data = thread_data->next()) {
+ thread_data->SubtractDeathResultsFromBirths(death_reset_results);
+ }
}
}
// static
void ThreadData::SnapshotCurrentPhase(
+ bool reset,
ProcessDataPhaseSnapshot* process_data_phase) {
// Add births that have run to completion to |collected_data|.
// |birth_counts| tracks the total number of births recorded at each location
// for which we have not seen a death count.
BirthCountMap birth_counts;
- ThreadData::SnapshotAllExecutedTasks(process_data_phase, &birth_counts);
+ ThreadData::SnapshotAllExecutedTasks(reset, process_data_phase,
+ &birth_counts);
// Add births that are still active -- i.e. objects that have tallied a birth,
// but have not yet tallied a matching death, and hence must be either
@@ -608,18 +648,21 @@ void ThreadData::SnapshotCurrentPhase(
void ThreadData::SnapshotExecutedTasks(
ProcessDataPhaseSnapshot* process_data_phase,
+ DeathResetResults* death_reset_results,
BirthCountMap* birth_counts) {
// Get copy of data, so that the data will not change during the iterations
// and processing.
ThreadData::BirthMap birth_map;
ThreadData::DeathMap death_map;
ThreadData::ParentChildSet parent_child_set;
- SnapshotMaps(&birth_map, &death_map, &parent_child_set);
+ SnapshotMaps(&birth_map, &death_map, death_reset_results, &parent_child_set);
for (const auto& death : death_map) {
process_data_phase->tasks.push_back(
TaskSnapshot(*death.first, death.second, thread_name()));
- (*birth_counts)[death.first] -= death.first->birth_count();
+ // We don't populate birth_counts if a reset was requested.
+ if (death_reset_results == nullptr)
+ (*birth_counts)[death.first] -= death.first->birth_count();
}
for (const auto& birth : birth_map) {
@@ -638,12 +681,35 @@ void ThreadData::SnapshotExecutedTasks(
// This may be called from another thread.
void ThreadData::SnapshotMaps(BirthMap* birth_map,
DeathMap* death_map,
+ DeathResetResults* death_reset_results,
ParentChildSet* parent_child_set) {
base::AutoLock lock(map_lock_);
- for (const auto& birth : birth_map_)
- (*birth_map)[birth.first] = birth.second;
- for (const auto& death : death_map_)
+
+ if (death_reset_results == nullptr) {
+ // When reset is not requested, snapshot births.
+ for (const auto& birth : birth_map_)
+ (*birth_map)[birth.first] = birth.second;
+ }
+ for (auto& death : death_map_) {
+ // Don't snapshot deaths with 0 count. Deaths with 0 count can result from
+ // prior calls to SnapshotMaps with death_reset_results!=null param.
+ if (death.second.count() <= 0)
+ continue;
+
(*death_map)[death.first] = death.second;
+ if (death_reset_results != nullptr) {
+ // If resetting deaths is requested, store the curent value of the death
+ // count in death_reset_results, and then clear the death.
+ const auto& death_reset_result = death_reset_results->find(death.first);
+
+ if (death_reset_result != death_reset_results->end())
+ death_reset_result->second += death.second.count();
+ else
+ (*death_reset_results)[death.first] = death.second.count();
+
+ death.second.Clear();
+ }
+ }
if (!kTrackParentChildLinks)
return;
@@ -652,6 +718,18 @@ void ThreadData::SnapshotMaps(BirthMap* birth_map,
parent_child_set->insert(parent_child);
}
+// This may be called from another thread.
+void ThreadData::SubtractDeathResultsFromBirths(
+ const DeathResetResults& death_reset_results) {
+ base::AutoLock lock(map_lock_);
+
+ for (const auto& births : birth_map_) {
+ const auto& death_reset_result = death_reset_results.find(births.second);
+ if (death_reset_result != death_reset_results.end())
+ births.second->SubtractBirths(death_reset_result->second);
+ }
+}
+
static void OptionallyInitializeAlternateTimer() {
NowFunction* alternate_time_source = GetAlternateTimeSource();
if (alternate_time_source)
@@ -833,6 +911,8 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
delete it->second; // Delete the Birth Records.
delete next_thread_data; // Includes all Death Records.
}
+
+ completed_phases_snapshots_.Get().clear();
}
//------------------------------------------------------------------------------

Powered by Google App Engine
This is Rietveld 408576698