Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(404)

Side by Side Diff: base/tracked_objects.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Address Eric's comment. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/logging.h" 15 #include "base/logging.h"
16 #include "base/numerics/safe_conversions.h"
17 #include "base/numerics/safe_math.h"
16 #include "base/process/process_handle.h" 18 #include "base/process/process_handle.h"
17 #include "base/strings/stringprintf.h" 19 #include "base/strings/stringprintf.h"
18 #include "base/third_party/valgrind/memcheck.h" 20 #include "base/third_party/valgrind/memcheck.h"
19 #include "base/threading/worker_pool.h" 21 #include "base/threading/worker_pool.h"
20 #include "base/tracking_info.h" 22 #include "base/tracking_info.h"
21 #include "build/build_config.h" 23 #include "build/build_config.h"
22 24
23 using base::TimeDelta; 25 using base::TimeDelta;
24 26
25 namespace base { 27 namespace base {
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
79 //------------------------------------------------------------------------------ 81 //------------------------------------------------------------------------------
80 // DeathData tallies durations when a death takes place. 82 // DeathData tallies durations when a death takes place.
81 83
82 DeathData::DeathData() 84 DeathData::DeathData()
83 : count_(0), 85 : count_(0),
84 sample_probability_count_(0), 86 sample_probability_count_(0),
85 run_duration_sum_(0), 87 run_duration_sum_(0),
86 queue_duration_sum_(0), 88 queue_duration_sum_(0),
87 run_duration_max_(0), 89 run_duration_max_(0),
88 queue_duration_max_(0), 90 queue_duration_max_(0),
91 alloc_ops_(0),
92 free_ops_(0),
93 allocated_bytes_(0),
94 freed_bytes_(0),
95 alloc_overhead_bytes_(0),
96 max_allocated_bytes_(0),
89 run_duration_sample_(0), 97 run_duration_sample_(0),
90 queue_duration_sample_(0), 98 queue_duration_sample_(0),
91 last_phase_snapshot_(nullptr) { 99 last_phase_snapshot_(nullptr) {}
92 }
93 100
94 DeathData::DeathData(const DeathData& other) 101 DeathData::DeathData(const DeathData& other)
95 : count_(other.count_), 102 : count_(other.count_),
96 sample_probability_count_(other.sample_probability_count_), 103 sample_probability_count_(other.sample_probability_count_),
97 run_duration_sum_(other.run_duration_sum_), 104 run_duration_sum_(other.run_duration_sum_),
98 queue_duration_sum_(other.queue_duration_sum_), 105 queue_duration_sum_(other.queue_duration_sum_),
99 run_duration_max_(other.run_duration_max_), 106 run_duration_max_(other.run_duration_max_),
100 queue_duration_max_(other.queue_duration_max_), 107 queue_duration_max_(other.queue_duration_max_),
108 alloc_ops_(other.alloc_ops_),
109 free_ops_(other.free_ops_),
110 allocated_bytes_(other.allocated_bytes_),
111 freed_bytes_(other.freed_bytes_),
112 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
113 max_allocated_bytes_(other.max_allocated_bytes_),
101 run_duration_sample_(other.run_duration_sample_), 114 run_duration_sample_(other.run_duration_sample_),
102 queue_duration_sample_(other.queue_duration_sample_), 115 queue_duration_sample_(other.queue_duration_sample_),
103 last_phase_snapshot_(nullptr) { 116 last_phase_snapshot_(nullptr) {
104 // This constructor will be used by std::map when adding new DeathData values 117 // This constructor will be used by std::map when adding new DeathData values
105 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 118 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
106 // need to worry about ownership transfer. 119 // need to worry about ownership transfer.
107 DCHECK(other.last_phase_snapshot_ == nullptr); 120 DCHECK(other.last_phase_snapshot_ == nullptr);
108 } 121 }
109 122
110 DeathData::~DeathData() { 123 DeathData::~DeathData() {
111 while (last_phase_snapshot_) { 124 while (last_phase_snapshot_) {
112 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; 125 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
113 last_phase_snapshot_ = snapshot->prev; 126 last_phase_snapshot_ = snapshot->prev;
114 delete snapshot; 127 delete snapshot;
115 } 128 }
116 } 129 }
117 130
118 // TODO(jar): I need to see if this macro to optimize branching is worth using. 131 // TODO(jar): I need to see if this macro to optimize branching is worth using.
119 // 132 //
120 // This macro has no branching, so it is surely fast, and is equivalent to: 133 // This macro has no branching, so it is surely fast, and is equivalent to:
121 // if (assign_it) 134 // if (assign_it)
122 // target = source; 135 // target = source;
123 // We use a macro rather than a template to force this to inline. 136 // We use a macro rather than a template to force this to inline.
124 // Related code for calculating max is discussed on the web. 137 // Related code for calculating max is discussed on the web.
125 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 138 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
126 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) 139 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
127 140
128 void DeathData::RecordDeath(const int32_t queue_duration, 141 void DeathData::RecordDurations(const int32_t queue_duration,
129 const int32_t run_duration, 142 const int32_t run_duration,
130 const uint32_t random_number) { 143 const uint32_t random_number) {
131 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 144 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
132 if (count_ < INT_MAX) 145 if (count_ < INT_MAX)
133 base::subtle::NoBarrier_Store(&count_, count_ + 1); 146 base::subtle::NoBarrier_Store(&count_, count_ + 1);
134 147
135 int sample_probability_count = 148 int sample_probability_count =
136 base::subtle::NoBarrier_Load(&sample_probability_count_); 149 base::subtle::NoBarrier_Load(&sample_probability_count_);
137 if (sample_probability_count < INT_MAX) 150 if (sample_probability_count < INT_MAX)
138 ++sample_probability_count; 151 ++sample_probability_count;
139 base::subtle::NoBarrier_Store(&sample_probability_count_, 152 base::subtle::NoBarrier_Store(&sample_probability_count_,
140 sample_probability_count); 153 sample_probability_count);
(...skipping 16 matching lines...) Expand all
157 // but that should be inconsequentially likely). We ignore the fact that we 170 // but that should be inconsequentially likely). We ignore the fact that we
158 // correlated our selection of a sample to the run and queue times (i.e., we 171 // correlated our selection of a sample to the run and queue times (i.e., we
159 // used them to generate random_number). 172 // used them to generate random_number).
160 CHECK_GT(sample_probability_count, 0); 173 CHECK_GT(sample_probability_count, 0);
161 if (0 == (random_number % sample_probability_count)) { 174 if (0 == (random_number % sample_probability_count)) {
162 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); 175 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
163 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 176 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
164 } 177 }
165 } 178 }
166 179
180 void DeathData::RecordAllocations(const uint32_t alloc_ops,
181 const uint32_t free_ops,
182 const uint32_t allocated_bytes,
183 const uint32_t freed_bytes,
184 const uint32_t alloc_overhead_bytes,
185 const uint32_t max_allocated_bytes) {
186 // Use saturating arithmetic.
187 SaturatingMemberAdd(alloc_ops, &alloc_ops_);
188 SaturatingMemberAdd(free_ops, &free_ops_);
189 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_);
190 SaturatingMemberAdd(freed_bytes, &freed_bytes_);
191 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
192
193 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
194 if (max > max_allocated_bytes_)
195 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
196 }
197
167 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 198 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
168 // Snapshotting and storing current state. 199 // Snapshotting and storing current state.
169 last_phase_snapshot_ = new DeathDataPhaseSnapshot( 200 last_phase_snapshot_ =
170 profiling_phase, count(), run_duration_sum(), run_duration_max(), 201 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
171 run_duration_sample(), queue_duration_sum(), queue_duration_max(),
172 queue_duration_sample(), last_phase_snapshot_);
173 202
174 // Not touching fields for which a delta can be computed by comparing with a 203 // Not touching fields for which a delta can be computed by comparing with a
175 // snapshot from the previous phase. Resetting other fields. Sample values 204 // snapshot from the previous phase. Resetting other fields. Sample values
176 // will be reset upon next death recording because sample_probability_count_ 205 // will be reset upon next death recording because sample_probability_count_
177 // is set to 0. 206 // is set to 0.
178 // We avoid resetting to 0 in favor of deltas whenever possible. The reason 207 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
179 // is that for incrementable fields, resetting to 0 from the snapshot thread 208 // is that for incrementable fields, resetting to 0 from the snapshot thread
180 // potentially in parallel with incrementing in the death thread may result in 209 // potentially in parallel with incrementing in the death thread may result in
181 // significant data corruption that has a potential to grow with time. Not 210 // significant data corruption that has a potential to grow with time. Not
182 // resetting incrementable fields and using deltas will cause any 211 // resetting incrementable fields and using deltas will cause any
(...skipping 11 matching lines...) Expand all
194 // The damage is limited to selecting a wrong sample, which is not something 223 // The damage is limited to selecting a wrong sample, which is not something
195 // that can cause accumulating or cascading effects. 224 // that can cause accumulating or cascading effects.
196 // If there were no inconsistencies caused by race conditions, we never send a 225 // If there were no inconsistencies caused by race conditions, we never send a
197 // sample for the previous phase in the next phase's snapshot because 226 // sample for the previous phase in the next phase's snapshot because
198 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. 227 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
199 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); 228 base::subtle::NoBarrier_Store(&sample_probability_count_, 0);
200 base::subtle::NoBarrier_Store(&run_duration_max_, 0); 229 base::subtle::NoBarrier_Store(&run_duration_max_, 0);
201 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); 230 base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
202 } 231 }
203 232
233 void DeathData::SaturatingMemberAdd(const uint32_t addend,
234 base::subtle::Atomic32* sum) {
235 // Bail quick if no work or already saturated.
236 if (addend == 0U || *sum == INT_MAX)
237 return;
238
239 base::CheckedNumeric<int32_t> new_sum = *sum;
240 new_sum += addend;
241 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
242 }
243
204 //------------------------------------------------------------------------------ 244 //------------------------------------------------------------------------------
205 DeathDataSnapshot::DeathDataSnapshot() 245 DeathDataSnapshot::DeathDataSnapshot()
206 : count(-1), 246 : count(-1),
207 run_duration_sum(-1), 247 run_duration_sum(-1),
208 run_duration_max(-1), 248 run_duration_max(-1),
209 run_duration_sample(-1), 249 run_duration_sample(-1),
210 queue_duration_sum(-1), 250 queue_duration_sum(-1),
211 queue_duration_max(-1), 251 queue_duration_max(-1),
212 queue_duration_sample(-1) { 252 queue_duration_sample(-1),
213 } 253 alloc_ops(-1),
254 free_ops(-1),
255 allocated_bytes(-1),
256 freed_bytes(-1),
257 alloc_overhead_bytes(-1),
258 max_allocated_bytes(-1) {}
214 259
215 DeathDataSnapshot::DeathDataSnapshot(int count, 260 DeathDataSnapshot::DeathDataSnapshot(int count,
216 int32_t run_duration_sum, 261 int32_t run_duration_sum,
217 int32_t run_duration_max, 262 int32_t run_duration_max,
218 int32_t run_duration_sample, 263 int32_t run_duration_sample,
219 int32_t queue_duration_sum, 264 int32_t queue_duration_sum,
220 int32_t queue_duration_max, 265 int32_t queue_duration_max,
221 int32_t queue_duration_sample) 266 int32_t queue_duration_sample,
267 int32_t alloc_ops,
268 int32_t free_ops,
269 int32_t allocated_bytes,
270 int32_t freed_bytes,
271 int32_t alloc_overhead_bytes,
272 int32_t max_allocated_bytes)
222 : count(count), 273 : count(count),
223 run_duration_sum(run_duration_sum), 274 run_duration_sum(run_duration_sum),
224 run_duration_max(run_duration_max), 275 run_duration_max(run_duration_max),
225 run_duration_sample(run_duration_sample), 276 run_duration_sample(run_duration_sample),
226 queue_duration_sum(queue_duration_sum), 277 queue_duration_sum(queue_duration_sum),
227 queue_duration_max(queue_duration_max), 278 queue_duration_max(queue_duration_max),
228 queue_duration_sample(queue_duration_sample) {} 279 queue_duration_sample(queue_duration_sample),
280 alloc_ops(alloc_ops),
281 free_ops(free_ops),
282 allocated_bytes(allocated_bytes),
283 freed_bytes(freed_bytes),
284 alloc_overhead_bytes(alloc_overhead_bytes),
285 max_allocated_bytes(max_allocated_bytes) {}
286
287 DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
288 : count(death_data.count()),
289 run_duration_sum(death_data.run_duration_sum()),
290 run_duration_max(death_data.run_duration_max()),
291 run_duration_sample(death_data.run_duration_sample()),
292 queue_duration_sum(death_data.queue_duration_sum()),
293 queue_duration_max(death_data.queue_duration_max()),
294 queue_duration_sample(death_data.queue_duration_sample()),
295 alloc_ops(death_data.alloc_ops()),
296 free_ops(death_data.free_ops()),
297 allocated_bytes(death_data.allocated_bytes()),
298 freed_bytes(death_data.freed_bytes()),
299 alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
300 max_allocated_bytes(death_data.max_allocated_bytes()) {}
301
302 DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
303 default;
229 304
230 DeathDataSnapshot::~DeathDataSnapshot() { 305 DeathDataSnapshot::~DeathDataSnapshot() {
231 } 306 }
232 307
233 DeathDataSnapshot DeathDataSnapshot::Delta( 308 DeathDataSnapshot DeathDataSnapshot::Delta(
234 const DeathDataSnapshot& older) const { 309 const DeathDataSnapshot& older) const {
235 return DeathDataSnapshot(count - older.count, 310 return DeathDataSnapshot(
236 run_duration_sum - older.run_duration_sum, 311 count - older.count, run_duration_sum - older.run_duration_sum,
237 run_duration_max, run_duration_sample, 312 run_duration_max, run_duration_sample,
238 queue_duration_sum - older.queue_duration_sum, 313 queue_duration_sum - older.queue_duration_sum, queue_duration_max,
239 queue_duration_max, queue_duration_sample); 314 queue_duration_sample, alloc_ops - older.alloc_ops,
315 free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
316 freed_bytes - older.freed_bytes,
317 alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
240 } 318 }
241 319
242 //------------------------------------------------------------------------------ 320 //------------------------------------------------------------------------------
243 BirthOnThread::BirthOnThread(const Location& location, 321 BirthOnThread::BirthOnThread(const Location& location,
244 const ThreadData& current) 322 const ThreadData& current)
245 : location_(location), 323 : location_(location),
246 birth_thread_(&current) { 324 birth_thread_(&current) {
247 } 325 }
248 326
249 //------------------------------------------------------------------------------ 327 //------------------------------------------------------------------------------
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 526
449 // Add births that are still active -- i.e. objects that have tallied a birth, 527 // Add births that are still active -- i.e. objects that have tallied a birth,
450 // but have not yet tallied a matching death, and hence must be either 528 // but have not yet tallied a matching death, and hence must be either
451 // running, queued up, or being held in limbo for future posting. 529 // running, queued up, or being held in limbo for future posting.
452 auto* current_phase_tasks = 530 auto* current_phase_tasks =
453 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; 531 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
454 for (const auto& birth_count : birth_counts) { 532 for (const auto& birth_count : birth_counts) {
455 if (birth_count.second > 0) { 533 if (birth_count.second > 0) {
456 current_phase_tasks->push_back( 534 current_phase_tasks->push_back(
457 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), 535 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
458 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), 536 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
537 0, 0, 0, 0, 0, 0),
459 "Still_Alive")); 538 "Still_Alive"));
460 } 539 }
461 } 540 }
462 } 541 }
463 542
464 // static 543 // static
465 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { 544 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
466 // Get an unchanging copy of a ThreadData list. 545 // Get an unchanging copy of a ThreadData list.
467 ThreadData* my_list = ThreadData::first(); 546 ThreadData* my_list = ThreadData::first();
468 547
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); 586 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
508 587
509 DeathMap::iterator it = death_map_.find(&births); 588 DeathMap::iterator it = death_map_.find(&births);
510 DeathData* death_data; 589 DeathData* death_data;
511 if (it != death_map_.end()) { 590 if (it != death_map_.end()) {
512 death_data = &it->second; 591 death_data = &it->second;
513 } else { 592 } else {
514 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 593 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
515 death_data = &death_map_[&births]; 594 death_data = &death_map_[&births];
516 } // Release lock ASAP. 595 } // Release lock ASAP.
517 death_data->RecordDeath(queue_duration, run_duration, random_number_); 596 death_data->RecordDurations(queue_duration, run_duration, random_number_);
597
598 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
599 if (stopwatch.heap_tracking_enabled()) {
600 base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
601 // Saturate the 64 bit counts on conversion to 32 bit storage.
602 death_data->RecordAllocations(
603 base::saturated_cast<int32_t>(heap_usage.alloc_ops),
604 base::saturated_cast<int32_t>(heap_usage.free_ops),
605 base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
606 base::saturated_cast<int32_t>(heap_usage.free_bytes),
607 base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
608 base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
609 }
610 #endif
518 } 611 }
519 612
520 // static 613 // static
521 Births* ThreadData::TallyABirthIfActive(const Location& location) { 614 Births* ThreadData::TallyABirthIfActive(const Location& location) {
522 if (!TrackingStatus()) 615 if (!TrackingStatus())
523 return NULL; 616 return NULL;
524 ThreadData* current_thread_data = Get(); 617 ThreadData* current_thread_data = Get();
525 if (!current_thread_data) 618 if (!current_thread_data)
526 return NULL; 619 return NULL;
527 return current_thread_data->TallyABirth(location); 620 return current_thread_data->TallyABirth(location);
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 BirthMap* birth_map, 739 BirthMap* birth_map,
647 DeathsSnapshot* deaths) { 740 DeathsSnapshot* deaths) {
648 base::AutoLock lock(map_lock_); 741 base::AutoLock lock(map_lock_);
649 742
650 for (const auto& birth : birth_map_) 743 for (const auto& birth : birth_map_)
651 (*birth_map)[birth.first] = birth.second; 744 (*birth_map)[birth.first] = birth.second;
652 745
653 for (const auto& death : death_map_) { 746 for (const auto& death : death_map_) {
654 deaths->push_back(std::make_pair( 747 deaths->push_back(std::make_pair(
655 death.first, 748 death.first,
656 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), 749 DeathDataPhaseSnapshot(profiling_phase, death.second,
657 death.second.run_duration_sum(),
658 death.second.run_duration_max(),
659 death.second.run_duration_sample(),
660 death.second.queue_duration_sum(),
661 death.second.queue_duration_max(),
662 death.second.queue_duration_sample(),
663 death.second.last_phase_snapshot()))); 750 death.second.last_phase_snapshot())));
664 } 751 }
665 } 752 }
666 753
667 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { 754 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
668 base::AutoLock lock(map_lock_); 755 base::AutoLock lock(map_lock_);
669 756
670 for (auto& death : death_map_) { 757 for (auto& death : death_map_) {
671 death.second.OnProfilingPhaseCompleted(profiling_phase); 758 death.second.OnProfilingPhaseCompleted(profiling_phase);
672 } 759 }
(...skipping 25 matching lines...) Expand all
698 785
699 // Incarnation counter is only significant to testing, as it otherwise will 786 // Incarnation counter is only significant to testing, as it otherwise will
700 // never again change in this process. 787 // never again change in this process.
701 ++incarnation_counter_; 788 ++incarnation_counter_;
702 789
703 // The lock is not critical for setting status_, but it doesn't hurt. It also 790 // The lock is not critical for setting status_, but it doesn't hurt. It also
704 // ensures that if we have a racy initialization, that we'll bail as soon as 791 // ensures that if we have a racy initialization, that we'll bail as soon as
705 // we get the lock earlier in this method. 792 // we get the lock earlier in this method.
706 base::subtle::Release_Store(&status_, kInitialStartupState); 793 base::subtle::Release_Store(&status_, kInitialStartupState);
707 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED); 794 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
795
796 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
797 // Make sure heap tracking is enabled ASAP if the default state is active.
798 if (kInitialStartupState == PROFILING_ACTIVE &&
799 !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) {
800 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
801 }
802 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
708 } 803 }
709 804
710 // static 805 // static
711 void ThreadData::InitializeAndSetTrackingStatus(Status status) { 806 void ThreadData::InitializeAndSetTrackingStatus(Status status) {
712 DCHECK_GE(status, DEACTIVATED); 807 DCHECK_GE(status, DEACTIVATED);
713 DCHECK_LE(status, PROFILING_ACTIVE); 808 DCHECK_LE(status, PROFILING_ACTIVE);
714 809
715 EnsureTlsInitialization(); // No-op if already initialized. 810 EnsureTlsInitialization(); // No-op if already initialized.
716 811
717 if (status > DEACTIVATED) 812 if (status > DEACTIVATED) {
718 status = PROFILING_ACTIVE; 813 status = PROFILING_ACTIVE;
814
815 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
816 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
817 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
818 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
819 }
719 base::subtle::Release_Store(&status_, status); 820 base::subtle::Release_Store(&status_, status);
720 } 821 }
721 822
722 // static 823 // static
723 ThreadData::Status ThreadData::status() { 824 ThreadData::Status ThreadData::status() {
724 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_)); 825 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
725 } 826 }
726 827
727 // static 828 // static
728 bool ThreadData::TrackingStatus() { 829 bool ThreadData::TrackingStatus() {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
816 //------------------------------------------------------------------------------ 917 //------------------------------------------------------------------------------
817 TaskStopwatch::TaskStopwatch() 918 TaskStopwatch::TaskStopwatch()
818 : wallclock_duration_ms_(0), 919 : wallclock_duration_ms_(0),
819 current_thread_data_(NULL), 920 current_thread_data_(NULL),
820 excluded_duration_ms_(0), 921 excluded_duration_ms_(0),
821 parent_(NULL) { 922 parent_(NULL) {
822 #if DCHECK_IS_ON() 923 #if DCHECK_IS_ON()
823 state_ = CREATED; 924 state_ = CREATED;
824 child_ = NULL; 925 child_ = NULL;
825 #endif 926 #endif
927 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
928 heap_tracking_enabled_ =
929 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
930 #endif
826 } 931 }
827 932
828 TaskStopwatch::~TaskStopwatch() { 933 TaskStopwatch::~TaskStopwatch() {
829 #if DCHECK_IS_ON() 934 #if DCHECK_IS_ON()
830 DCHECK(state_ != RUNNING); 935 DCHECK(state_ != RUNNING);
831 DCHECK(child_ == NULL); 936 DCHECK(child_ == NULL);
832 #endif 937 #endif
833 } 938 }
834 939
835 void TaskStopwatch::Start() { 940 void TaskStopwatch::Start() {
836 #if DCHECK_IS_ON() 941 #if DCHECK_IS_ON()
837 DCHECK(state_ == CREATED); 942 DCHECK(state_ == CREATED);
838 state_ = RUNNING; 943 state_ = RUNNING;
839 #endif 944 #endif
840 945
841 start_time_ = ThreadData::Now(); 946 start_time_ = ThreadData::Now();
947 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
948 if (heap_tracking_enabled_)
949 heap_usage_.Start();
950 #endif
842 951
843 current_thread_data_ = ThreadData::Get(); 952 current_thread_data_ = ThreadData::Get();
844 if (!current_thread_data_) 953 if (!current_thread_data_)
845 return; 954 return;
846 955
847 parent_ = current_thread_data_->current_stopwatch_; 956 parent_ = current_thread_data_->current_stopwatch_;
848 #if DCHECK_IS_ON() 957 #if DCHECK_IS_ON()
849 if (parent_) { 958 if (parent_) {
850 DCHECK(parent_->state_ == RUNNING); 959 DCHECK(parent_->state_ == RUNNING);
851 DCHECK(parent_->child_ == NULL); 960 DCHECK(parent_->child_ == NULL);
852 parent_->child_ = this; 961 parent_->child_ = this;
853 } 962 }
854 #endif 963 #endif
855 current_thread_data_->current_stopwatch_ = this; 964 current_thread_data_->current_stopwatch_ = this;
856 } 965 }
857 966
858 void TaskStopwatch::Stop() { 967 void TaskStopwatch::Stop() {
859 const TrackedTime end_time = ThreadData::Now(); 968 const TrackedTime end_time = ThreadData::Now();
860 #if DCHECK_IS_ON() 969 #if DCHECK_IS_ON()
861 DCHECK(state_ == RUNNING); 970 DCHECK(state_ == RUNNING);
862 state_ = STOPPED; 971 state_ = STOPPED;
863 DCHECK(child_ == NULL); 972 DCHECK(child_ == NULL);
864 #endif 973 #endif
974 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
975 if (heap_tracking_enabled_)
976 heap_usage_.Stop(true);
977 #endif
865 978
866 if (!start_time_.is_null() && !end_time.is_null()) { 979 if (!start_time_.is_null() && !end_time.is_null()) {
867 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); 980 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
868 } 981 }
869 982
870 if (!current_thread_data_) 983 if (!current_thread_data_)
871 return; 984 return;
872 985
873 DCHECK(current_thread_data_->current_stopwatch_ == this); 986 DCHECK(current_thread_data_->current_stopwatch_ == this);
874 current_thread_data_->current_stopwatch_ = parent_; 987 current_thread_data_->current_stopwatch_ = parent_;
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 #endif 1019 #endif
907 1020
908 return current_thread_data_; 1021 return current_thread_data_;
909 } 1022 }
910 1023
911 //------------------------------------------------------------------------------ 1024 //------------------------------------------------------------------------------
912 // DeathDataPhaseSnapshot 1025 // DeathDataPhaseSnapshot
913 1026
914 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( 1027 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
915 int profiling_phase, 1028 int profiling_phase,
916 int count, 1029 const DeathData& death,
917 int32_t run_duration_sum,
918 int32_t run_duration_max,
919 int32_t run_duration_sample,
920 int32_t queue_duration_sum,
921 int32_t queue_duration_max,
922 int32_t queue_duration_sample,
923 const DeathDataPhaseSnapshot* prev) 1030 const DeathDataPhaseSnapshot* prev)
924 : profiling_phase(profiling_phase), 1031 : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
925 death_data(count,
926 run_duration_sum,
927 run_duration_max,
928 run_duration_sample,
929 queue_duration_sum,
930 queue_duration_max,
931 queue_duration_sample),
932 prev(prev) {}
933 1032
934 //------------------------------------------------------------------------------ 1033 //------------------------------------------------------------------------------
935 // TaskSnapshot 1034 // TaskSnapshot
936 1035
937 TaskSnapshot::TaskSnapshot() { 1036 TaskSnapshot::TaskSnapshot() {
938 } 1037 }
939 1038
940 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, 1039 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
941 const DeathDataSnapshot& death_data, 1040 const DeathDataSnapshot& death_data,
942 const std::string& death_thread_name) 1041 const std::string& death_thread_name)
(...skipping 28 matching lines...) Expand all
971 #endif 1070 #endif
972 } 1071 }
973 1072
974 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1073 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
975 default; 1074 default;
976 1075
977 ProcessDataSnapshot::~ProcessDataSnapshot() { 1076 ProcessDataSnapshot::~ProcessDataSnapshot() {
978 } 1077 }
979 1078
980 } // namespace tracked_objects 1079 } // namespace tracked_objects
OLDNEW
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698