Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: base/tracked_objects.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Fix remaining clang compile errors. Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/logging.h" 15 #include "base/logging.h"
16 #include "base/numerics/safe_conversions.h"
16 #include "base/process/process_handle.h" 17 #include "base/process/process_handle.h"
17 #include "base/strings/stringprintf.h" 18 #include "base/strings/stringprintf.h"
18 #include "base/third_party/valgrind/memcheck.h" 19 #include "base/third_party/valgrind/memcheck.h"
19 #include "base/threading/worker_pool.h" 20 #include "base/threading/worker_pool.h"
20 #include "base/tracking_info.h" 21 #include "base/tracking_info.h"
21 #include "build/build_config.h" 22 #include "build/build_config.h"
22 23
23 using base::TimeDelta; 24 using base::TimeDelta;
24 25
25 namespace base { 26 namespace base {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 switches::kProfilerTiming) == 68 switches::kProfilerTiming) ==
68 switches::kProfilerTimingDisabledValue) 69 switches::kProfilerTimingDisabledValue)
69 ? DISABLED_TIMING 70 ? DISABLED_TIMING
70 : ENABLED_TIMING; 71 : ENABLED_TIMING;
71 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, 72 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
72 current_timing_enabled); 73 current_timing_enabled);
73 } 74 }
74 return current_timing_enabled == ENABLED_TIMING; 75 return current_timing_enabled == ENABLED_TIMING;
75 } 76 }
76 77
78 void SaturatingAdd(const uint32_t addend, base::subtle::Atomic32* sum) {
79 // Bail quick if no work or already saturated.
80 if (addend == 0U || *sum == INT_MAX)
Primiano Tucci (use gerrit) 2016/11/16 16:39:09 I think you are supposed to use NoBarrier_Load and
Sigurður Ásgeirsson 2016/11/16 21:30:28 This is modeled after all other Atomic32 (member)
Primiano Tucci (use gerrit) 2016/11/28 18:38:31 what I am saying is that all the rest of the code
Sigurður Ásgeirsson 2016/11/30 14:41:13 No, this consistent with all other ThreadData memb
Primiano Tucci (use gerrit) 2016/11/30 16:22:09 Uh! This is quite inconsistent, but at this point
81 return;
82
83 // Check for overflow.
84 int32_t new_sum = *sum + addend;
85 if (new_sum < *sum)
Primiano Tucci (use gerrit) 2016/11/16 16:39:09 I think that integer overflow is still considered
Sigurður Ásgeirsson 2016/11/16 21:30:28 Ah, thanks. Went to base::CheckedNumeric - it seem
Primiano Tucci (use gerrit) 2016/11/28 18:38:31 ah perfect thanks.
86 new_sum = INT_MAX;
87
88 base::subtle::NoBarrier_Store(sum, new_sum);
89 }
90
77 } // namespace 91 } // namespace
78 92
79 //------------------------------------------------------------------------------ 93 //------------------------------------------------------------------------------
80 // DeathData tallies durations when a death takes place. 94 // DeathData tallies durations when a death takes place.
81 95
82 DeathData::DeathData() 96 DeathData::DeathData()
83 : count_(0), 97 : count_(0),
84 sample_probability_count_(0), 98 sample_probability_count_(0),
85 run_duration_sum_(0), 99 run_duration_sum_(0),
86 queue_duration_sum_(0), 100 queue_duration_sum_(0),
87 run_duration_max_(0), 101 run_duration_max_(0),
88 queue_duration_max_(0), 102 queue_duration_max_(0),
89 run_duration_sample_(0), 103 run_duration_sample_(0),
90 queue_duration_sample_(0), 104 queue_duration_sample_(0),
91 last_phase_snapshot_(nullptr) { 105 alloc_ops_(0),
92 } 106 free_ops_(0),
107 allocated_bytes_(0),
108 freed_bytes_(0),
109 alloc_overhead_bytes_(0),
110 max_allocated_bytes_(0),
111 last_phase_snapshot_(nullptr) {}
93 112
94 DeathData::DeathData(const DeathData& other) 113 DeathData::DeathData(const DeathData& other)
95 : count_(other.count_), 114 : count_(other.count_),
96 sample_probability_count_(other.sample_probability_count_), 115 sample_probability_count_(other.sample_probability_count_),
97 run_duration_sum_(other.run_duration_sum_), 116 run_duration_sum_(other.run_duration_sum_),
98 queue_duration_sum_(other.queue_duration_sum_), 117 queue_duration_sum_(other.queue_duration_sum_),
99 run_duration_max_(other.run_duration_max_), 118 run_duration_max_(other.run_duration_max_),
100 queue_duration_max_(other.queue_duration_max_), 119 queue_duration_max_(other.queue_duration_max_),
101 run_duration_sample_(other.run_duration_sample_), 120 run_duration_sample_(other.run_duration_sample_),
102 queue_duration_sample_(other.queue_duration_sample_), 121 queue_duration_sample_(other.queue_duration_sample_),
122 alloc_ops_(other.alloc_ops_),
123 free_ops_(other.free_ops_),
124 allocated_bytes_(other.allocated_bytes_),
125 freed_bytes_(other.freed_bytes_),
126 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
127 max_allocated_bytes_(other.max_allocated_bytes_),
103 last_phase_snapshot_(nullptr) { 128 last_phase_snapshot_(nullptr) {
104 // This constructor will be used by std::map when adding new DeathData values 129 // This constructor will be used by std::map when adding new DeathData values
105 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 130 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
106 // need to worry about ownership transfer. 131 // need to worry about ownership transfer.
107 DCHECK(other.last_phase_snapshot_ == nullptr); 132 DCHECK(other.last_phase_snapshot_ == nullptr);
108 } 133 }
109 134
110 DeathData::~DeathData() { 135 DeathData::~DeathData() {
111 while (last_phase_snapshot_) { 136 while (last_phase_snapshot_) {
112 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; 137 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
113 last_phase_snapshot_ = snapshot->prev; 138 last_phase_snapshot_ = snapshot->prev;
114 delete snapshot; 139 delete snapshot;
115 } 140 }
116 } 141 }
117 142
118 // TODO(jar): I need to see if this macro to optimize branching is worth using. 143 // TODO(jar): I need to see if this macro to optimize branching is worth using.
119 // 144 //
120 // This macro has no branching, so it is surely fast, and is equivalent to: 145 // This macro has no branching, so it is surely fast, and is equivalent to:
121 // if (assign_it) 146 // if (assign_it)
122 // target = source; 147 // target = source;
123 // We use a macro rather than a template to force this to inline. 148 // We use a macro rather than a template to force this to inline.
124 // Related code for calculating max is discussed on the web. 149 // Related code for calculating max is discussed on the web.
125 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 150 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
126 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) 151 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
127 152
128 void DeathData::RecordDeath(const int32_t queue_duration, 153 void DeathData::RecordDurations(const int32_t queue_duration,
129 const int32_t run_duration, 154 const int32_t run_duration,
130 const uint32_t random_number) { 155 const uint32_t random_number) {
131 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 156 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
132 if (count_ < INT_MAX) 157 if (count_ < INT_MAX)
133 base::subtle::NoBarrier_Store(&count_, count_ + 1); 158 base::subtle::NoBarrier_Store(&count_, count_ + 1);
134 159
135 int sample_probability_count = 160 int sample_probability_count =
136 base::subtle::NoBarrier_Load(&sample_probability_count_); 161 base::subtle::NoBarrier_Load(&sample_probability_count_);
137 if (sample_probability_count < INT_MAX) 162 if (sample_probability_count < INT_MAX)
138 ++sample_probability_count; 163 ++sample_probability_count;
139 base::subtle::NoBarrier_Store(&sample_probability_count_, 164 base::subtle::NoBarrier_Store(&sample_probability_count_,
140 sample_probability_count); 165 sample_probability_count);
(...skipping 16 matching lines...) Expand all
157 // but that should be inconsequentially likely). We ignore the fact that we 182 // but that should be inconsequentially likely). We ignore the fact that we
158 // correlated our selection of a sample to the run and queue times (i.e., we 183 // correlated our selection of a sample to the run and queue times (i.e., we
159 // used them to generate random_number). 184 // used them to generate random_number).
160 CHECK_GT(sample_probability_count, 0); 185 CHECK_GT(sample_probability_count, 0);
161 if (0 == (random_number % sample_probability_count)) { 186 if (0 == (random_number % sample_probability_count)) {
162 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); 187 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
163 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 188 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
164 } 189 }
165 } 190 }
166 191
192 void DeathData::RecordAllocations(const uint32_t alloc_ops,
193 const uint32_t free_ops,
194 const uint32_t allocated_bytes,
195 const uint32_t freed_bytes,
196 const uint32_t alloc_overhead_bytes,
197 const uint32_t max_allocated_bytes) {
198 // Use saturating arithmetic.
199 SaturatingAdd(alloc_ops, &alloc_ops_);
200 SaturatingAdd(free_ops, &free_ops_);
201 SaturatingAdd(allocated_bytes, &allocated_bytes_);
202 SaturatingAdd(freed_bytes, &freed_bytes_);
203 SaturatingAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
204
205 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
206 if (max > max_allocated_bytes_)
207 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
208 }
209
167 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 210 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
168 // Snapshotting and storing current state. 211 // Snapshotting and storing current state.
169 last_phase_snapshot_ = new DeathDataPhaseSnapshot( 212 last_phase_snapshot_ =
170 profiling_phase, count(), run_duration_sum(), run_duration_max(), 213 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
171 run_duration_sample(), queue_duration_sum(), queue_duration_max(),
172 queue_duration_sample(), last_phase_snapshot_);
173 214
174 // Not touching fields for which a delta can be computed by comparing with a 215 // Not touching fields for which a delta can be computed by comparing with a
175 // snapshot from the previous phase. Resetting other fields. Sample values 216 // snapshot from the previous phase. Resetting other fields. Sample values
176 // will be reset upon next death recording because sample_probability_count_ 217 // will be reset upon next death recording because sample_probability_count_
177 // is set to 0. 218 // is set to 0.
178 // We avoid resetting to 0 in favor of deltas whenever possible. The reason 219 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
179 // is that for incrementable fields, resetting to 0 from the snapshot thread 220 // is that for incrementable fields, resetting to 0 from the snapshot thread
180 // potentially in parallel with incrementing in the death thread may result in 221 // potentially in parallel with incrementing in the death thread may result in
181 // significant data corruption that has a potential to grow with time. Not 222 // significant data corruption that has a potential to grow with time. Not
182 // resetting incrementable fields and using deltas will cause any 223 // resetting incrementable fields and using deltas will cause any
(...skipping 19 matching lines...) Expand all
202 } 243 }
203 244
204 //------------------------------------------------------------------------------ 245 //------------------------------------------------------------------------------
205 DeathDataSnapshot::DeathDataSnapshot() 246 DeathDataSnapshot::DeathDataSnapshot()
206 : count(-1), 247 : count(-1),
207 run_duration_sum(-1), 248 run_duration_sum(-1),
208 run_duration_max(-1), 249 run_duration_max(-1),
209 run_duration_sample(-1), 250 run_duration_sample(-1),
210 queue_duration_sum(-1), 251 queue_duration_sum(-1),
211 queue_duration_max(-1), 252 queue_duration_max(-1),
212 queue_duration_sample(-1) { 253 queue_duration_sample(-1),
213 } 254 alloc_ops(-1),
255 free_ops(-1),
256 allocated_bytes(-1),
257 freed_bytes(-1),
258 alloc_overhead_bytes(-1),
259 max_allocated_bytes(-1) {}
214 260
215 DeathDataSnapshot::DeathDataSnapshot(int count, 261 DeathDataSnapshot::DeathDataSnapshot(int count,
216 int32_t run_duration_sum, 262 int32_t run_duration_sum,
217 int32_t run_duration_max, 263 int32_t run_duration_max,
218 int32_t run_duration_sample, 264 int32_t run_duration_sample,
219 int32_t queue_duration_sum, 265 int32_t queue_duration_sum,
220 int32_t queue_duration_max, 266 int32_t queue_duration_max,
221 int32_t queue_duration_sample) 267 int32_t queue_duration_sample,
268 int32_t alloc_ops,
269 int32_t free_ops,
270 int32_t allocated_bytes,
271 int32_t freed_bytes,
272 int32_t alloc_overhead_bytes,
273 int32_t max_allocated_bytes)
222 : count(count), 274 : count(count),
223 run_duration_sum(run_duration_sum), 275 run_duration_sum(run_duration_sum),
224 run_duration_max(run_duration_max), 276 run_duration_max(run_duration_max),
225 run_duration_sample(run_duration_sample), 277 run_duration_sample(run_duration_sample),
226 queue_duration_sum(queue_duration_sum), 278 queue_duration_sum(queue_duration_sum),
227 queue_duration_max(queue_duration_max), 279 queue_duration_max(queue_duration_max),
228 queue_duration_sample(queue_duration_sample) {} 280 queue_duration_sample(queue_duration_sample),
281 alloc_ops(alloc_ops),
282 free_ops(free_ops),
283 allocated_bytes(allocated_bytes),
284 freed_bytes(freed_bytes),
285 alloc_overhead_bytes(alloc_overhead_bytes),
286 max_allocated_bytes(max_allocated_bytes) {}
287
288 DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
289 : count(death_data.count()),
290 run_duration_sum(death_data.run_duration_sum()),
291 run_duration_max(death_data.run_duration_max()),
292 run_duration_sample(death_data.run_duration_sample()),
293 queue_duration_sum(death_data.queue_duration_sum()),
294 queue_duration_max(death_data.queue_duration_max()),
295 queue_duration_sample(death_data.queue_duration_sample()),
296 alloc_ops(death_data.alloc_ops()),
297 free_ops(death_data.free_ops()),
298 allocated_bytes(death_data.allocated_bytes()),
299 freed_bytes(death_data.freed_bytes()),
300 alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
301 max_allocated_bytes(death_data.max_allocated_bytes()) {}
302
303 DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
304 default;
229 305
230 DeathDataSnapshot::~DeathDataSnapshot() { 306 DeathDataSnapshot::~DeathDataSnapshot() {
231 } 307 }
232 308
233 DeathDataSnapshot DeathDataSnapshot::Delta( 309 DeathDataSnapshot DeathDataSnapshot::Delta(
234 const DeathDataSnapshot& older) const { 310 const DeathDataSnapshot& older) const {
235 return DeathDataSnapshot(count - older.count, 311 return DeathDataSnapshot(
236 run_duration_sum - older.run_duration_sum, 312 count - older.count, run_duration_sum - older.run_duration_sum,
237 run_duration_max, run_duration_sample, 313 run_duration_max, run_duration_sample,
238 queue_duration_sum - older.queue_duration_sum, 314 queue_duration_sum - older.queue_duration_sum, queue_duration_max,
239 queue_duration_max, queue_duration_sample); 315 queue_duration_sample, alloc_ops - older.alloc_ops,
316 free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
317 freed_bytes - older.freed_bytes,
318 alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
240 } 319 }
241 320
242 //------------------------------------------------------------------------------ 321 //------------------------------------------------------------------------------
243 BirthOnThread::BirthOnThread(const Location& location, 322 BirthOnThread::BirthOnThread(const Location& location,
244 const ThreadData& current) 323 const ThreadData& current)
245 : location_(location), 324 : location_(location),
246 birth_thread_(&current) { 325 birth_thread_(&current) {
247 } 326 }
248 327
249 //------------------------------------------------------------------------------ 328 //------------------------------------------------------------------------------
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 527
449 // Add births that are still active -- i.e. objects that have tallied a birth, 528 // Add births that are still active -- i.e. objects that have tallied a birth,
450 // but have not yet tallied a matching death, and hence must be either 529 // but have not yet tallied a matching death, and hence must be either
451 // running, queued up, or being held in limbo for future posting. 530 // running, queued up, or being held in limbo for future posting.
452 auto* current_phase_tasks = 531 auto* current_phase_tasks =
453 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; 532 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
454 for (const auto& birth_count : birth_counts) { 533 for (const auto& birth_count : birth_counts) {
455 if (birth_count.second > 0) { 534 if (birth_count.second > 0) {
456 current_phase_tasks->push_back( 535 current_phase_tasks->push_back(
457 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), 536 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
458 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), 537 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
538 0, 0, 0, 0, 0, 0),
459 "Still_Alive")); 539 "Still_Alive"));
460 } 540 }
461 } 541 }
462 } 542 }
463 543
464 // static 544 // static
465 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { 545 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
466 // Get an unchanging copy of a ThreadData list. 546 // Get an unchanging copy of a ThreadData list.
467 ThreadData* my_list = ThreadData::first(); 547 ThreadData* my_list = ThreadData::first();
468 548
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); 587 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
508 588
509 DeathMap::iterator it = death_map_.find(&births); 589 DeathMap::iterator it = death_map_.find(&births);
510 DeathData* death_data; 590 DeathData* death_data;
511 if (it != death_map_.end()) { 591 if (it != death_map_.end()) {
512 death_data = &it->second; 592 death_data = &it->second;
513 } else { 593 } else {
514 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 594 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
515 death_data = &death_map_[&births]; 595 death_data = &death_map_[&births];
516 } // Release lock ASAP. 596 } // Release lock ASAP.
517 death_data->RecordDeath(queue_duration, run_duration, random_number_); 597 death_data->RecordDurations(queue_duration, run_duration, random_number_);
598
599 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
600 if (stopwatch.heap_tracking_enabled()) {
601 base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
602 // Saturate the 64 bit counts on conversion to 32 bit storage.
603 death_data->RecordAllocations(
604 base::saturated_cast<int32_t>(heap_usage.alloc_ops),
605 base::saturated_cast<int32_t>(heap_usage.free_ops),
606 base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
607 base::saturated_cast<int32_t>(heap_usage.free_bytes),
608 base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
609 base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
610 }
611 #endif
518 } 612 }
519 613
520 // static 614 // static
521 Births* ThreadData::TallyABirthIfActive(const Location& location) { 615 Births* ThreadData::TallyABirthIfActive(const Location& location) {
522 if (!TrackingStatus()) 616 if (!TrackingStatus())
523 return NULL; 617 return NULL;
524 ThreadData* current_thread_data = Get(); 618 ThreadData* current_thread_data = Get();
525 if (!current_thread_data) 619 if (!current_thread_data)
526 return NULL; 620 return NULL;
527 return current_thread_data->TallyABirth(location); 621 return current_thread_data->TallyABirth(location);
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 BirthMap* birth_map, 740 BirthMap* birth_map,
647 DeathsSnapshot* deaths) { 741 DeathsSnapshot* deaths) {
648 base::AutoLock lock(map_lock_); 742 base::AutoLock lock(map_lock_);
649 743
650 for (const auto& birth : birth_map_) 744 for (const auto& birth : birth_map_)
651 (*birth_map)[birth.first] = birth.second; 745 (*birth_map)[birth.first] = birth.second;
652 746
653 for (const auto& death : death_map_) { 747 for (const auto& death : death_map_) {
654 deaths->push_back(std::make_pair( 748 deaths->push_back(std::make_pair(
655 death.first, 749 death.first,
656 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), 750 DeathDataPhaseSnapshot(profiling_phase, death.second,
657 death.second.run_duration_sum(),
658 death.second.run_duration_max(),
659 death.second.run_duration_sample(),
660 death.second.queue_duration_sum(),
661 death.second.queue_duration_max(),
662 death.second.queue_duration_sample(),
663 death.second.last_phase_snapshot()))); 751 death.second.last_phase_snapshot())));
664 } 752 }
665 } 753 }
666 754
667 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { 755 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
668 base::AutoLock lock(map_lock_); 756 base::AutoLock lock(map_lock_);
669 757
670 for (auto& death : death_map_) { 758 for (auto& death : death_map_) {
671 death.second.OnProfilingPhaseCompleted(profiling_phase); 759 death.second.OnProfilingPhaseCompleted(profiling_phase);
672 } 760 }
(...skipping 25 matching lines...) Expand all
698 786
699 // Incarnation counter is only significant to testing, as it otherwise will 787 // Incarnation counter is only significant to testing, as it otherwise will
700 // never again change in this process. 788 // never again change in this process.
701 ++incarnation_counter_; 789 ++incarnation_counter_;
702 790
703 // The lock is not critical for setting status_, but it doesn't hurt. It also 791 // The lock is not critical for setting status_, but it doesn't hurt. It also
704 // ensures that if we have a racy initialization, that we'll bail as soon as 792 // ensures that if we have a racy initialization, that we'll bail as soon as
705 // we get the lock earlier in this method. 793 // we get the lock earlier in this method.
706 base::subtle::Release_Store(&status_, kInitialStartupState); 794 base::subtle::Release_Store(&status_, kInitialStartupState);
707 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED); 795 DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
796
797 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
798 // Make sure heap tracking is enabled ASAP if the default state is active.
799 if (kInitialStartupState == PROFILING_ACTIVE &&
800 !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) {
801 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
802 }
803 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
708 } 804 }
709 805
710 // static 806 // static
711 void ThreadData::InitializeAndSetTrackingStatus(Status status) { 807 void ThreadData::InitializeAndSetTrackingStatus(Status status) {
712 DCHECK_GE(status, DEACTIVATED); 808 DCHECK_GE(status, DEACTIVATED);
713 DCHECK_LE(status, PROFILING_ACTIVE); 809 DCHECK_LE(status, PROFILING_ACTIVE);
714 810
715 EnsureTlsInitialization(); // No-op if already initialized. 811 EnsureTlsInitialization(); // No-op if already initialized.
716 812
717 if (status > DEACTIVATED) 813 if (status > DEACTIVATED) {
718 status = PROFILING_ACTIVE; 814 status = PROFILING_ACTIVE;
815
816 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
817 if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
818 base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
819 #endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
820 }
719 base::subtle::Release_Store(&status_, status); 821 base::subtle::Release_Store(&status_, status);
720 } 822 }
721 823
722 // static 824 // static
723 ThreadData::Status ThreadData::status() { 825 ThreadData::Status ThreadData::status() {
724 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_)); 826 return static_cast<ThreadData::Status>(base::subtle::Acquire_Load(&status_));
725 } 827 }
726 828
727 // static 829 // static
728 bool ThreadData::TrackingStatus() { 830 bool ThreadData::TrackingStatus() {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
816 //------------------------------------------------------------------------------ 918 //------------------------------------------------------------------------------
817 TaskStopwatch::TaskStopwatch() 919 TaskStopwatch::TaskStopwatch()
818 : wallclock_duration_ms_(0), 920 : wallclock_duration_ms_(0),
819 current_thread_data_(NULL), 921 current_thread_data_(NULL),
820 excluded_duration_ms_(0), 922 excluded_duration_ms_(0),
821 parent_(NULL) { 923 parent_(NULL) {
822 #if DCHECK_IS_ON() 924 #if DCHECK_IS_ON()
823 state_ = CREATED; 925 state_ = CREATED;
824 child_ = NULL; 926 child_ = NULL;
825 #endif 927 #endif
928 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
929 heap_tracking_enabled_ =
930 base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
931 #endif
826 } 932 }
827 933
828 TaskStopwatch::~TaskStopwatch() { 934 TaskStopwatch::~TaskStopwatch() {
829 #if DCHECK_IS_ON() 935 #if DCHECK_IS_ON()
830 DCHECK(state_ != RUNNING); 936 DCHECK(state_ != RUNNING);
831 DCHECK(child_ == NULL); 937 DCHECK(child_ == NULL);
832 #endif 938 #endif
833 } 939 }
834 940
835 void TaskStopwatch::Start() { 941 void TaskStopwatch::Start() {
836 #if DCHECK_IS_ON() 942 #if DCHECK_IS_ON()
837 DCHECK(state_ == CREATED); 943 DCHECK(state_ == CREATED);
838 state_ = RUNNING; 944 state_ = RUNNING;
839 #endif 945 #endif
840 946
841 start_time_ = ThreadData::Now(); 947 start_time_ = ThreadData::Now();
948 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
949 if (heap_tracking_enabled_)
950 heap_usage_.Start();
951 #endif
842 952
843 current_thread_data_ = ThreadData::Get(); 953 current_thread_data_ = ThreadData::Get();
844 if (!current_thread_data_) 954 if (!current_thread_data_)
845 return; 955 return;
846 956
847 parent_ = current_thread_data_->current_stopwatch_; 957 parent_ = current_thread_data_->current_stopwatch_;
848 #if DCHECK_IS_ON() 958 #if DCHECK_IS_ON()
849 if (parent_) { 959 if (parent_) {
850 DCHECK(parent_->state_ == RUNNING); 960 DCHECK(parent_->state_ == RUNNING);
851 DCHECK(parent_->child_ == NULL); 961 DCHECK(parent_->child_ == NULL);
852 parent_->child_ = this; 962 parent_->child_ = this;
853 } 963 }
854 #endif 964 #endif
855 current_thread_data_->current_stopwatch_ = this; 965 current_thread_data_->current_stopwatch_ = this;
856 } 966 }
857 967
858 void TaskStopwatch::Stop() { 968 void TaskStopwatch::Stop() {
859 const TrackedTime end_time = ThreadData::Now(); 969 const TrackedTime end_time = ThreadData::Now();
860 #if DCHECK_IS_ON() 970 #if DCHECK_IS_ON()
861 DCHECK(state_ == RUNNING); 971 DCHECK(state_ == RUNNING);
862 state_ = STOPPED; 972 state_ = STOPPED;
863 DCHECK(child_ == NULL); 973 DCHECK(child_ == NULL);
864 #endif 974 #endif
975 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
976 if (heap_tracking_enabled_)
977 heap_usage_.Stop(true);
978 #endif
865 979
866 if (!start_time_.is_null() && !end_time.is_null()) { 980 if (!start_time_.is_null() && !end_time.is_null()) {
867 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); 981 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
868 } 982 }
869 983
870 if (!current_thread_data_) 984 if (!current_thread_data_)
871 return; 985 return;
872 986
873 DCHECK(current_thread_data_->current_stopwatch_ == this); 987 DCHECK(current_thread_data_->current_stopwatch_ == this);
874 current_thread_data_->current_stopwatch_ = parent_; 988 current_thread_data_->current_stopwatch_ = parent_;
(...skipping 18 matching lines...) Expand all
893 } 1007 }
894 1008
895 int32_t TaskStopwatch::RunDurationMs() const { 1009 int32_t TaskStopwatch::RunDurationMs() const {
896 #if DCHECK_IS_ON() 1010 #if DCHECK_IS_ON()
897 DCHECK(state_ == STOPPED); 1011 DCHECK(state_ == STOPPED);
898 #endif 1012 #endif
899 1013
900 return wallclock_duration_ms_ - excluded_duration_ms_; 1014 return wallclock_duration_ms_ - excluded_duration_ms_;
901 } 1015 }
902 1016
1017 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
1018 const base::debug::ThreadHeapUsageTracker& TaskStopwatch::heap_usage() const {
1019 return heap_usage_;
1020 }
1021 #endif
1022
903 ThreadData* TaskStopwatch::GetThreadData() const { 1023 ThreadData* TaskStopwatch::GetThreadData() const {
904 #if DCHECK_IS_ON() 1024 #if DCHECK_IS_ON()
905 DCHECK(state_ != CREATED); 1025 DCHECK(state_ != CREATED);
906 #endif 1026 #endif
907 1027
908 return current_thread_data_; 1028 return current_thread_data_;
909 } 1029 }
910 1030
911 //------------------------------------------------------------------------------ 1031 //------------------------------------------------------------------------------
912 // DeathDataPhaseSnapshot 1032 // DeathDataPhaseSnapshot
913 1033
914 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( 1034 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
915 int profiling_phase, 1035 int profiling_phase,
916 int count, 1036 const DeathData& death,
917 int32_t run_duration_sum,
918 int32_t run_duration_max,
919 int32_t run_duration_sample,
920 int32_t queue_duration_sum,
921 int32_t queue_duration_max,
922 int32_t queue_duration_sample,
923 const DeathDataPhaseSnapshot* prev) 1037 const DeathDataPhaseSnapshot* prev)
924 : profiling_phase(profiling_phase), 1038 : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
925 death_data(count,
926 run_duration_sum,
927 run_duration_max,
928 run_duration_sample,
929 queue_duration_sum,
930 queue_duration_max,
931 queue_duration_sample),
932 prev(prev) {}
933 1039
934 //------------------------------------------------------------------------------ 1040 //------------------------------------------------------------------------------
935 // TaskSnapshot 1041 // TaskSnapshot
936 1042
937 TaskSnapshot::TaskSnapshot() { 1043 TaskSnapshot::TaskSnapshot() {
938 } 1044 }
939 1045
940 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, 1046 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
941 const DeathDataSnapshot& death_data, 1047 const DeathDataSnapshot& death_data,
942 const std::string& death_thread_name) 1048 const std::string& death_thread_name)
(...skipping 28 matching lines...) Expand all
971 #endif 1077 #endif
972 } 1078 }
973 1079
974 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1080 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
975 default; 1081 default;
976 1082
977 ProcessDataSnapshot::~ProcessDataSnapshot() { 1083 ProcessDataSnapshot::~ProcessDataSnapshot() {
978 } 1084 }
979 1085
980 } // namespace tracked_objects 1086 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698