Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(135)

Side by Side Diff: base/tracked_objects.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Address Primiano's comments, re-enable that irksome DCHECK, now that the fix is in. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/logging.h" 15 #include "base/logging.h"
16 #include "base/numerics/safe_conversions.h"
17 #include "base/numerics/safe_math.h"
16 #include "base/process/process_handle.h" 18 #include "base/process/process_handle.h"
17 #include "base/strings/stringprintf.h" 19 #include "base/strings/stringprintf.h"
18 #include "base/third_party/valgrind/memcheck.h" 20 #include "base/third_party/valgrind/memcheck.h"
19 #include "base/threading/worker_pool.h" 21 #include "base/threading/worker_pool.h"
20 #include "base/tracking_info.h" 22 #include "base/tracking_info.h"
21 #include "build/build_config.h" 23 #include "build/build_config.h"
22 24
23 using base::TimeDelta; 25 using base::TimeDelta;
24 26
25 namespace base { 27 namespace base {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 switches::kProfilerTiming) == 69 switches::kProfilerTiming) ==
68 switches::kProfilerTimingDisabledValue) 70 switches::kProfilerTimingDisabledValue)
69 ? DISABLED_TIMING 71 ? DISABLED_TIMING
70 : ENABLED_TIMING; 72 : ENABLED_TIMING;
71 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, 73 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
72 current_timing_enabled); 74 current_timing_enabled);
73 } 75 }
74 return current_timing_enabled == ENABLED_TIMING; 76 return current_timing_enabled == ENABLED_TIMING;
75 } 77 }
76 78
79 void SaturatingAdd(const uint32_t addend, base::subtle::Atomic32* sum) {
80 // Bail quick if no work or already saturated.
81 if (addend == 0U || *sum == INT_MAX)
82 return;
83
84 base::CheckedNumeric<int32_t> new_sum = *sum;
dcheng 2016/12/01 07:44:28 How come it's safe to skip using an atomic load op
dcheng 2016/12/01 07:48:26 (Argh, upon looking again, I see that it's just on
Sigurður Ásgeirsson 2016/12/01 14:01:11 This is done for consistency with all other DeathD
Sigurður Ásgeirsson 2016/12/01 14:01:11 Acknowledged.
85 new_sum += addend;
86 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
87 }
88
77 } // namespace 89 } // namespace
78 90
79 //------------------------------------------------------------------------------ 91 //------------------------------------------------------------------------------
80 // DeathData tallies durations when a death takes place. 92 // DeathData tallies durations when a death takes place.
81 93
82 DeathData::DeathData() 94 DeathData::DeathData()
83 : count_(0), 95 : count_(0),
84 sample_probability_count_(0), 96 sample_probability_count_(0),
85 run_duration_sum_(0), 97 run_duration_sum_(0),
86 queue_duration_sum_(0), 98 queue_duration_sum_(0),
87 run_duration_max_(0), 99 run_duration_max_(0),
88 queue_duration_max_(0), 100 queue_duration_max_(0),
89 run_duration_sample_(0), 101 run_duration_sample_(0),
90 queue_duration_sample_(0), 102 queue_duration_sample_(0),
91 last_phase_snapshot_(nullptr) { 103 alloc_ops_(0),
92 } 104 free_ops_(0),
105 allocated_bytes_(0),
106 freed_bytes_(0),
107 alloc_overhead_bytes_(0),
108 max_allocated_bytes_(0),
109 last_phase_snapshot_(nullptr) {}
93 110
94 DeathData::DeathData(const DeathData& other) 111 DeathData::DeathData(const DeathData& other)
95 : count_(other.count_), 112 : count_(other.count_),
96 sample_probability_count_(other.sample_probability_count_), 113 sample_probability_count_(other.sample_probability_count_),
97 run_duration_sum_(other.run_duration_sum_), 114 run_duration_sum_(other.run_duration_sum_),
98 queue_duration_sum_(other.queue_duration_sum_), 115 queue_duration_sum_(other.queue_duration_sum_),
99 run_duration_max_(other.run_duration_max_), 116 run_duration_max_(other.run_duration_max_),
100 queue_duration_max_(other.queue_duration_max_), 117 queue_duration_max_(other.queue_duration_max_),
101 run_duration_sample_(other.run_duration_sample_), 118 run_duration_sample_(other.run_duration_sample_),
102 queue_duration_sample_(other.queue_duration_sample_), 119 queue_duration_sample_(other.queue_duration_sample_),
120 alloc_ops_(other.alloc_ops_),
121 free_ops_(other.free_ops_),
122 allocated_bytes_(other.allocated_bytes_),
123 freed_bytes_(other.freed_bytes_),
124 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
125 max_allocated_bytes_(other.max_allocated_bytes_),
103 last_phase_snapshot_(nullptr) { 126 last_phase_snapshot_(nullptr) {
104 // This constructor will be used by std::map when adding new DeathData values 127 // This constructor will be used by std::map when adding new DeathData values
105 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 128 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
106 // need to worry about ownership transfer. 129 // need to worry about ownership transfer.
107 DCHECK(other.last_phase_snapshot_ == nullptr); 130 DCHECK(other.last_phase_snapshot_ == nullptr);
108 } 131 }
109 132
110 DeathData::~DeathData() { 133 DeathData::~DeathData() {
111 while (last_phase_snapshot_) { 134 while (last_phase_snapshot_) {
112 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; 135 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
113 last_phase_snapshot_ = snapshot->prev; 136 last_phase_snapshot_ = snapshot->prev;
114 delete snapshot; 137 delete snapshot;
115 } 138 }
116 } 139 }
117 140
118 // TODO(jar): I need to see if this macro to optimize branching is worth using. 141 // TODO(jar): I need to see if this macro to optimize branching is worth using.
119 // 142 //
120 // This macro has no branching, so it is surely fast, and is equivalent to: 143 // This macro has no branching, so it is surely fast, and is equivalent to:
121 // if (assign_it) 144 // if (assign_it)
122 // target = source; 145 // target = source;
123 // We use a macro rather than a template to force this to inline. 146 // We use a macro rather than a template to force this to inline.
124 // Related code for calculating max is discussed on the web. 147 // Related code for calculating max is discussed on the web.
125 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 148 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
126 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) 149 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
127 150
128 void DeathData::RecordDeath(const int32_t queue_duration, 151 void DeathData::RecordDurations(const int32_t queue_duration,
129 const int32_t run_duration, 152 const int32_t run_duration,
130 const uint32_t random_number) { 153 const uint32_t random_number) {
131 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 154 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
132 if (count_ < INT_MAX) 155 if (count_ < INT_MAX)
133 base::subtle::NoBarrier_Store(&count_, count_ + 1); 156 base::subtle::NoBarrier_Store(&count_, count_ + 1);
134 157
135 int sample_probability_count = 158 int sample_probability_count =
136 base::subtle::NoBarrier_Load(&sample_probability_count_); 159 base::subtle::NoBarrier_Load(&sample_probability_count_);
137 if (sample_probability_count < INT_MAX) 160 if (sample_probability_count < INT_MAX)
138 ++sample_probability_count; 161 ++sample_probability_count;
139 base::subtle::NoBarrier_Store(&sample_probability_count_, 162 base::subtle::NoBarrier_Store(&sample_probability_count_,
140 sample_probability_count); 163 sample_probability_count);
(...skipping 16 matching lines...) Expand all
157 // but that should be inconsequentially likely). We ignore the fact that we 180 // but that should be inconsequentially likely). We ignore the fact that we
158 // correlated our selection of a sample to the run and queue times (i.e., we 181 // correlated our selection of a sample to the run and queue times (i.e., we
159 // used them to generate random_number). 182 // used them to generate random_number).
160 CHECK_GT(sample_probability_count, 0); 183 CHECK_GT(sample_probability_count, 0);
161 if (0 == (random_number % sample_probability_count)) { 184 if (0 == (random_number % sample_probability_count)) {
162 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); 185 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
163 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 186 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
164 } 187 }
165 } 188 }
166 189
190 void DeathData::RecordAllocations(const uint32_t alloc_ops,
191 const uint32_t free_ops,
192 const uint32_t allocated_bytes,
193 const uint32_t freed_bytes,
194 const uint32_t alloc_overhead_bytes,
195 const uint32_t max_allocated_bytes) {
196 // Use saturating arithmetic.
197 SaturatingAdd(alloc_ops, &alloc_ops_);
198 SaturatingAdd(free_ops, &free_ops_);
199 SaturatingAdd(allocated_bytes, &allocated_bytes_);
200 SaturatingAdd(freed_bytes, &freed_bytes_);
201 SaturatingAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
202
203 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
204 if (max > max_allocated_bytes_)
205 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
206 }
207
167 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 208 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
168 // Snapshotting and storing current state. 209 // Snapshotting and storing current state.
169 last_phase_snapshot_ = new DeathDataPhaseSnapshot( 210 last_phase_snapshot_ =
170 profiling_phase, count(), run_duration_sum(), run_duration_max(), 211 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
171 run_duration_sample(), queue_duration_sum(), queue_duration_max(),
172 queue_duration_sample(), last_phase_snapshot_);
173 212
174 // Not touching fields for which a delta can be computed by comparing with a 213 // Not touching fields for which a delta can be computed by comparing with a
175 // snapshot from the previous phase. Resetting other fields. Sample values 214 // snapshot from the previous phase. Resetting other fields. Sample values
176 // will be reset upon next death recording because sample_probability_count_ 215 // will be reset upon next death recording because sample_probability_count_
177 // is set to 0. 216 // is set to 0.
178 // We avoid resetting to 0 in favor of deltas whenever possible. The reason 217 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
179 // is that for incrementable fields, resetting to 0 from the snapshot thread 218 // is that for incrementable fields, resetting to 0 from the snapshot thread
180 // potentially in parallel with incrementing in the death thread may result in 219 // potentially in parallel with incrementing in the death thread may result in
181 // significant data corruption that has a potential to grow with time. Not 220 // significant data corruption that has a potential to grow with time. Not
182 // resetting incrementable fields and using deltas will cause any 221 // resetting incrementable fields and using deltas will cause any
(...skipping 19 matching lines...) Expand all
202 } 241 }
203 242
204 //------------------------------------------------------------------------------ 243 //------------------------------------------------------------------------------
205 DeathDataSnapshot::DeathDataSnapshot() 244 DeathDataSnapshot::DeathDataSnapshot()
206 : count(-1), 245 : count(-1),
207 run_duration_sum(-1), 246 run_duration_sum(-1),
208 run_duration_max(-1), 247 run_duration_max(-1),
209 run_duration_sample(-1), 248 run_duration_sample(-1),
210 queue_duration_sum(-1), 249 queue_duration_sum(-1),
211 queue_duration_max(-1), 250 queue_duration_max(-1),
212 queue_duration_sample(-1) { 251 queue_duration_sample(-1),
213 } 252 alloc_ops(-1),
253 free_ops(-1),
254 allocated_bytes(-1),
255 freed_bytes(-1),
256 alloc_overhead_bytes(-1),
257 max_allocated_bytes(-1) {}
214 258
215 DeathDataSnapshot::DeathDataSnapshot(int count, 259 DeathDataSnapshot::DeathDataSnapshot(int count,
216 int32_t run_duration_sum, 260 int32_t run_duration_sum,
217 int32_t run_duration_max, 261 int32_t run_duration_max,
218 int32_t run_duration_sample, 262 int32_t run_duration_sample,
219 int32_t queue_duration_sum, 263 int32_t queue_duration_sum,
220 int32_t queue_duration_max, 264 int32_t queue_duration_max,
221 int32_t queue_duration_sample) 265 int32_t queue_duration_sample,
266 int32_t alloc_ops,
267 int32_t free_ops,
268 int32_t allocated_bytes,
269 int32_t freed_bytes,
270 int32_t alloc_overhead_bytes,
271 int32_t max_allocated_bytes)
222 : count(count), 272 : count(count),
223 run_duration_sum(run_duration_sum), 273 run_duration_sum(run_duration_sum),
224 run_duration_max(run_duration_max), 274 run_duration_max(run_duration_max),
225 run_duration_sample(run_duration_sample), 275 run_duration_sample(run_duration_sample),
226 queue_duration_sum(queue_duration_sum), 276 queue_duration_sum(queue_duration_sum),
227 queue_duration_max(queue_duration_max), 277 queue_duration_max(queue_duration_max),
228 queue_duration_sample(queue_duration_sample) {} 278 queue_duration_sample(queue_duration_sample),
279 alloc_ops(alloc_ops),
280 free_ops(free_ops),
281 allocated_bytes(allocated_bytes),
282 freed_bytes(freed_bytes),
283 alloc_overhead_bytes(alloc_overhead_bytes),
284 max_allocated_bytes(max_allocated_bytes) {}
285
286 DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
287 : count(death_data.count()),
288 run_duration_sum(death_data.run_duration_sum()),
289 run_duration_max(death_data.run_duration_max()),
290 run_duration_sample(death_data.run_duration_sample()),
291 queue_duration_sum(death_data.queue_duration_sum()),
292 queue_duration_max(death_data.queue_duration_max()),
293 queue_duration_sample(death_data.queue_duration_sample()),
294 alloc_ops(death_data.alloc_ops()),
295 free_ops(death_data.free_ops()),
296 allocated_bytes(death_data.allocated_bytes()),
297 freed_bytes(death_data.freed_bytes()),
298 alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
299 max_allocated_bytes(death_data.max_allocated_bytes()) {}
300
301 DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
302 default;
229 303
230 DeathDataSnapshot::~DeathDataSnapshot() { 304 DeathDataSnapshot::~DeathDataSnapshot() {
231 } 305 }
232 306
233 DeathDataSnapshot DeathDataSnapshot::Delta( 307 DeathDataSnapshot DeathDataSnapshot::Delta(
234 const DeathDataSnapshot& older) const { 308 const DeathDataSnapshot& older) const {
235 return DeathDataSnapshot(count - older.count, 309 return DeathDataSnapshot(
236 run_duration_sum - older.run_duration_sum, 310 count - older.count, run_duration_sum - older.run_duration_sum,
237 run_duration_max, run_duration_sample, 311 run_duration_max, run_duration_sample,
238 queue_duration_sum - older.queue_duration_sum, 312 queue_duration_sum - older.queue_duration_sum, queue_duration_max,
239 queue_duration_max, queue_duration_sample); 313 queue_duration_sample, alloc_ops - older.alloc_ops,
314 free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
315 freed_bytes - older.freed_bytes,
316 alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
240 } 317 }
241 318
242 //------------------------------------------------------------------------------ 319 //------------------------------------------------------------------------------
243 BirthOnThread::BirthOnThread(const Location& location, 320 BirthOnThread::BirthOnThread(const Location& location,
244 const ThreadData& current) 321 const ThreadData& current)
245 : location_(location), 322 : location_(location),
246 birth_thread_(&current) { 323 birth_thread_(&current) {
247 } 324 }
248 325
249 //------------------------------------------------------------------------------ 326 //------------------------------------------------------------------------------
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 525
449 // Add births that are still active -- i.e. objects that have tallied a birth, 526 // Add births that are still active -- i.e. objects that have tallied a birth,
450 // but have not yet tallied a matching death, and hence must be either 527 // but have not yet tallied a matching death, and hence must be either
451 // running, queued up, or being held in limbo for future posting. 528 // running, queued up, or being held in limbo for future posting.
452 auto* current_phase_tasks = 529 auto* current_phase_tasks =
453 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; 530 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
454 for (const auto& birth_count : birth_counts) { 531 for (const auto& birth_count : birth_counts) {
455 if (birth_count.second > 0) { 532 if (birth_count.second > 0) {
456 current_phase_tasks->push_back( 533 current_phase_tasks->push_back(
457 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), 534 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
458 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), 535 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
536 0, 0, 0, 0, 0, 0),
459 "Still_Alive")); 537 "Still_Alive"));
460 } 538 }
461 } 539 }
462 } 540 }
463 541
464 // static 542 // static
465 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { 543 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
466 // Get an unchanging copy of a ThreadData list. 544 // Get an unchanging copy of a ThreadData list.
467 ThreadData* my_list = ThreadData::first(); 545 ThreadData* my_list = ThreadData::first();
468 546
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); 585 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
508 586
509 DeathMap::iterator it = death_map_.find(&births); 587 DeathMap::iterator it = death_map_.find(&births);
510 DeathData* death_data; 588 DeathData* death_data;
511 if (it != death_map_.end()) { 589 if (it != death_map_.end()) {
512 death_data = &it->second; 590 death_data = &it->second;
513 } else { 591 } else {
514 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 592 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
515 death_data = &death_map_[&births]; 593 death_data = &death_map_[&births];
516 } // Release lock ASAP. 594 } // Release lock ASAP.
517 death_data->RecordDeath(queue_duration, run_duration, random_number_); 595 death_data->RecordDurations(queue_duration, run_duration, random_number_);
596
597 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
598 if (stopwatch.heap_tracking_enabled()) {
599 base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
600 // Saturate the 64 bit counts on conversion to 32 bit storage.
601 death_data->RecordAllocations(
602 base::saturated_cast<int32_t>(heap_usage.alloc_ops),
603 base::saturated_cast<int32_t>(heap_usage.free_ops),
604 base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
605 base::saturated_cast<int32_t>(heap_usage.free_bytes),
606 base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
607 base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
608 }
609 #endif
518 } 610 }
519 611
520 // static 612 // static
521 Births* ThreadData::TallyABirthIfActive(const Location& location) { 613 Births* ThreadData::TallyABirthIfActive(const Location& location) {
522 if (!TrackingStatus()) 614 if (!TrackingStatus())
523 return NULL; 615 return NULL;
524 ThreadData* current_thread_data = Get(); 616 ThreadData* current_thread_data = Get();
525 if (!current_thread_data) 617 if (!current_thread_data)
526 return NULL; 618 return NULL;
527 return current_thread_data->TallyABirth(location); 619 return current_thread_data->TallyABirth(location);
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 BirthMap* birth_map, 738 BirthMap* birth_map,
647 DeathsSnapshot* deaths) { 739 DeathsSnapshot* deaths) {
648 base::AutoLock lock(map_lock_); 740 base::AutoLock lock(map_lock_);
649 741
650 for (const auto& birth : birth_map_) 742 for (const auto& birth : birth_map_)
651 (*birth_map)[birth.first] = birth.second; 743 (*birth_map)[birth.first] = birth.second;
652 744
653 for (const auto& death : death_map_) { 745 for (const auto& death : death_map_) {
654 deaths->push_back(std::make_pair( 746 deaths->push_back(std::make_pair(
655 death.first, 747 death.first,
656 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), 748 DeathDataPhaseSnapshot(profiling_phase, death.second,
657 death.second.run_duration_sum(),
658 death.second.run_duration_max(),
659 death.second.run_duration_sample(),
660 death.second.queue_duration_sum(),
661 death.second.queue_duration_max(),
662 death.second.queue_duration_sample(),
663 death.second.last_phase_snapshot()))); 749 death.second.last_phase_snapshot())));
664 } 750 }
665 } 751 }
666 752
667 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { 753 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
668 base::AutoLock lock(map_lock_); 754 base::AutoLock lock(map_lock_);
669 755
670 for (auto& death : death_map_) { 756 for (auto& death : death_map_) {
671 death.second.OnProfilingPhaseCompleted(profiling_phase); 757 death.second.OnProfilingPhaseCompleted(profiling_phase);
672 } 758 }
(...skipping 25 matching lines...) Expand all
698 784
699 // Incarnation counter is only significant to testing, as it otherwise will 785 // Incarnation counter is only significant to testing, as it otherwise will
700 // never again change in this process. 786 // never again change in this process.
701 ++incarnation_counter_; 787 ++incarnation_counter_;
702 788
703 // The lock is not critical for setting status_, but it doesn't hurt. It also 789 // The lock is not critical for setting status_, but it doesn't hurt. It also
704 // ensures that if we have a racy initialization, that we'll bail as soon as 790 // ensures that if we have a racy initialization, that we'll bail as soon as
705 // we get the lock earlier in this method. 791 // we get the lock earlier in this method.
706 base::subtle::Release_Store(&status_, kInitialStartupState); 792 base::subtle::Release_Store(&status_, kInitialStartupState);
707 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED); 793 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
794
795 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
796 // Make sure heap tracking is enabled ASAP if the default state is active.
797 if (kInitialStartupState == PROFILING_ACTIVE &&
798 !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) {
799 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
800 }
801 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
708 } 802 }
709 803
710 // static 804 // static
711 void ThreadData::InitializeAndSetTrackingStatus(Status status) { 805 void ThreadData::InitializeAndSetTrackingStatus(Status status) {
712 DCHECK_GE(status, DEACTIVATED); 806 DCHECK_GE(status, DEACTIVATED);
713 DCHECK_LE(status, PROFILING_ACTIVE); 807 DCHECK_LE(status, PROFILING_ACTIVE);
714 808
715 EnsureTlsInitialization(); // No-op if already initialized. 809 EnsureTlsInitialization(); // No-op if already initialized.
716 810
717 if (status > DEACTIVATED) 811 if (status > DEACTIVATED) {
718 status = PROFILING_ACTIVE; 812 status = PROFILING_ACTIVE;
813
814 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
815 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
816 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
817 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
818 }
719 base::subtle::Release_Store(&status_, status); 819 base::subtle::Release_Store(&status_, status);
720 } 820 }
721 821
722 // static 822 // static
723 ThreadData::Status ThreadData::status() { 823 ThreadData::Status ThreadData::status() {
724 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_)); 824 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
725 } 825 }
726 826
727 // static 827 // static
728 bool ThreadData::TrackingStatus() { 828 bool ThreadData::TrackingStatus() {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
816 //------------------------------------------------------------------------------ 916 //------------------------------------------------------------------------------
817 TaskStopwatch::TaskStopwatch() 917 TaskStopwatch::TaskStopwatch()
818 : wallclock_duration_ms_(0), 918 : wallclock_duration_ms_(0),
819 current_thread_data_(NULL), 919 current_thread_data_(NULL),
820 excluded_duration_ms_(0), 920 excluded_duration_ms_(0),
821 parent_(NULL) { 921 parent_(NULL) {
822 #if DCHECK_IS_ON() 922 #if DCHECK_IS_ON()
823 state_ = CREATED; 923 state_ = CREATED;
824 child_ = NULL; 924 child_ = NULL;
825 #endif 925 #endif
926 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
927 heap_tracking_enabled_ =
928 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
929 #endif
826 } 930 }
827 931
828 TaskStopwatch::~TaskStopwatch() { 932 TaskStopwatch::~TaskStopwatch() {
829 #if DCHECK_IS_ON() 933 #if DCHECK_IS_ON()
830 DCHECK(state_ != RUNNING); 934 DCHECK(state_ != RUNNING);
831 DCHECK(child_ == NULL); 935 DCHECK(child_ == NULL);
832 #endif 936 #endif
833 } 937 }
834 938
835 void TaskStopwatch::Start() { 939 void TaskStopwatch::Start() {
836 #if DCHECK_IS_ON() 940 #if DCHECK_IS_ON()
837 DCHECK(state_ == CREATED); 941 DCHECK(state_ == CREATED);
838 state_ = RUNNING; 942 state_ = RUNNING;
839 #endif 943 #endif
840 944
841 start_time_ = ThreadData::Now(); 945 start_time_ = ThreadData::Now();
946 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
947 if (heap_tracking_enabled_)
948 heap_usage_.Start();
949 #endif
842 950
843 current_thread_data_ = ThreadData::Get(); 951 current_thread_data_ = ThreadData::Get();
844 if (!current_thread_data_) 952 if (!current_thread_data_)
845 return; 953 return;
846 954
847 parent_ = current_thread_data_->current_stopwatch_; 955 parent_ = current_thread_data_->current_stopwatch_;
848 #if DCHECK_IS_ON() 956 #if DCHECK_IS_ON()
849 if (parent_) { 957 if (parent_) {
850 DCHECK(parent_->state_ == RUNNING); 958 DCHECK(parent_->state_ == RUNNING);
851 DCHECK(parent_->child_ == NULL); 959 DCHECK(parent_->child_ == NULL);
852 parent_->child_ = this; 960 parent_->child_ = this;
853 } 961 }
854 #endif 962 #endif
855 current_thread_data_->current_stopwatch_ = this; 963 current_thread_data_->current_stopwatch_ = this;
856 } 964 }
857 965
858 void TaskStopwatch::Stop() { 966 void TaskStopwatch::Stop() {
859 const TrackedTime end_time = ThreadData::Now(); 967 const TrackedTime end_time = ThreadData::Now();
860 #if DCHECK_IS_ON() 968 #if DCHECK_IS_ON()
861 DCHECK(state_ == RUNNING); 969 DCHECK(state_ == RUNNING);
862 state_ = STOPPED; 970 state_ = STOPPED;
863 DCHECK(child_ == NULL); 971 DCHECK(child_ == NULL);
864 #endif 972 #endif
973 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
974 if (heap_tracking_enabled_)
975 heap_usage_.Stop(true);
976 #endif
865 977
866 if (!start_time_.is_null() && !end_time.is_null()) { 978 if (!start_time_.is_null() && !end_time.is_null()) {
867 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); 979 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
868 } 980 }
869 981
870 if (!current_thread_data_) 982 if (!current_thread_data_)
871 return; 983 return;
872 984
873 DCHECK(current_thread_data_->current_stopwatch_ == this); 985 DCHECK(current_thread_data_->current_stopwatch_ == this);
874 current_thread_data_->current_stopwatch_ = parent_; 986 current_thread_data_->current_stopwatch_ = parent_;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 #endif 1018 #endif
907 1019
908 return current_thread_data_; 1020 return current_thread_data_;
909 } 1021 }
910 1022
911 //------------------------------------------------------------------------------ 1023 //------------------------------------------------------------------------------
912 // DeathDataPhaseSnapshot 1024 // DeathDataPhaseSnapshot
913 1025
914 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( 1026 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
915 int profiling_phase, 1027 int profiling_phase,
916 int count, 1028 const DeathData& death,
917 int32_t run_duration_sum,
918 int32_t run_duration_max,
919 int32_t run_duration_sample,
920 int32_t queue_duration_sum,
921 int32_t queue_duration_max,
922 int32_t queue_duration_sample,
923 const DeathDataPhaseSnapshot* prev) 1029 const DeathDataPhaseSnapshot* prev)
924 : profiling_phase(profiling_phase), 1030 : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
925 death_data(count,
926 run_duration_sum,
927 run_duration_max,
928 run_duration_sample,
929 queue_duration_sum,
930 queue_duration_max,
931 queue_duration_sample),
932 prev(prev) {}
933 1031
934 //------------------------------------------------------------------------------ 1032 //------------------------------------------------------------------------------
935 // TaskSnapshot 1033 // TaskSnapshot
936 1034
937 TaskSnapshot::TaskSnapshot() { 1035 TaskSnapshot::TaskSnapshot() {
938 } 1036 }
939 1037
940 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, 1038 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
941 const DeathDataSnapshot& death_data, 1039 const DeathDataSnapshot& death_data,
942 const std::string& death_thread_name) 1040 const std::string& death_thread_name)
(...skipping 28 matching lines...) Expand all
971 #endif 1069 #endif
972 } 1070 }
973 1071
974 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1072 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
975 default; 1073 default;
976 1074
977 ProcessDataSnapshot::~ProcessDataSnapshot() { 1075 ProcessDataSnapshot::~ProcessDataSnapshot() {
978 } 1076 }
979 1077
980 } // namespace tracked_objects 1078 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698