Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(40)

Side by Side Diff: base/tracked_objects.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Figure out where the @#$%! corruption is coming from. Move heap tracking to TaskStopwatch." Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <limits.h> 7 #include <limits.h>
8 #include <stdlib.h> 8 #include <stdlib.h>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_switches.h" 11 #include "base/base_switches.h"
12 #include "base/command_line.h" 12 #include "base/command_line.h"
13 #include "base/compiler_specific.h" 13 #include "base/compiler_specific.h"
14 #include "base/debug/leak_annotations.h" 14 #include "base/debug/leak_annotations.h"
15 #include "base/debug/scoped_thread_heap_usage.h"
15 #include "base/logging.h" 16 #include "base/logging.h"
16 #include "base/process/process_handle.h" 17 #include "base/process/process_handle.h"
17 #include "base/strings/stringprintf.h" 18 #include "base/strings/stringprintf.h"
18 #include "base/third_party/valgrind/memcheck.h" 19 #include "base/third_party/valgrind/memcheck.h"
19 #include "base/threading/worker_pool.h" 20 #include "base/threading/worker_pool.h"
20 #include "base/tracking_info.h" 21 #include "base/tracking_info.h"
21 #include "build/build_config.h" 22 #include "build/build_config.h"
22 23
23 using base::TimeDelta; 24 using base::TimeDelta;
24 25
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 switches::kProfilerTiming) == 68 switches::kProfilerTiming) ==
68 switches::kProfilerTimingDisabledValue) 69 switches::kProfilerTimingDisabledValue)
69 ? DISABLED_TIMING 70 ? DISABLED_TIMING
70 : ENABLED_TIMING; 71 : ENABLED_TIMING;
71 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, 72 base::subtle::NoBarrier_Store(&g_profiler_timing_enabled,
72 current_timing_enabled); 73 current_timing_enabled);
73 } 74 }
74 return current_timing_enabled == ENABLED_TIMING; 75 return current_timing_enabled == ENABLED_TIMING;
75 } 76 }
76 77
78 void SaturatingAdd(const uint32_t addend, base::subtle::Atomic32* sum) {
79 // Bail quick if no work or alreadu saturated.
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 typo: alreadu
Sigurður Ásgeirsson 2016/10/14 20:11:35 Done.
80 if (addend == 0U || *sum == INT_MAX)
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 I think you are supposed to use at least NoBarrier
Sigurður Ásgeirsson 2016/10/14 20:11:36 I'm modeling on what's done elsewhere in this file
81 return;
82
83 // Check for ovewflow.
84 int32_t new_sum = *sum + addend;
85 if (new_sum < *sum)
86 new_sum = INT_MAX;
87
88 base::subtle::NoBarrier_Store(sum, new_sum);
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 this is not atomic anymore now? Don't you want a N
Sigurður Ásgeirsson 2016/10/14 20:11:35 This thread is the only writer, and in the interes
89 }
90
77 } // namespace 91 } // namespace
78 92
79 //------------------------------------------------------------------------------ 93 //------------------------------------------------------------------------------
80 // DeathData tallies durations when a death takes place. 94 // DeathData tallies durations when a death takes place.
81 95
82 DeathData::DeathData() 96 DeathData::DeathData()
83 : count_(0), 97 : count_(0),
84 sample_probability_count_(0), 98 sample_probability_count_(0),
85 run_duration_sum_(0), 99 run_duration_sum_(0),
86 queue_duration_sum_(0), 100 queue_duration_sum_(0),
87 run_duration_max_(0), 101 run_duration_max_(0),
88 queue_duration_max_(0), 102 queue_duration_max_(0),
89 run_duration_sample_(0), 103 run_duration_sample_(0),
90 queue_duration_sample_(0), 104 queue_duration_sample_(0),
91 last_phase_snapshot_(nullptr) { 105 alloc_ops_(0),
92 } 106 free_ops_(0),
107 allocated_bytes_(0),
108 freed_bytes_(0),
109 alloc_overhead_bytes_(0),
110 max_allocated_bytes_(0),
111 last_phase_snapshot_(nullptr) {}
93 112
94 DeathData::DeathData(const DeathData& other) 113 DeathData::DeathData(const DeathData& other)
95 : count_(other.count_), 114 : count_(other.count_),
96 sample_probability_count_(other.sample_probability_count_), 115 sample_probability_count_(other.sample_probability_count_),
97 run_duration_sum_(other.run_duration_sum_), 116 run_duration_sum_(other.run_duration_sum_),
98 queue_duration_sum_(other.queue_duration_sum_), 117 queue_duration_sum_(other.queue_duration_sum_),
99 run_duration_max_(other.run_duration_max_), 118 run_duration_max_(other.run_duration_max_),
100 queue_duration_max_(other.queue_duration_max_), 119 queue_duration_max_(other.queue_duration_max_),
101 run_duration_sample_(other.run_duration_sample_), 120 run_duration_sample_(other.run_duration_sample_),
102 queue_duration_sample_(other.queue_duration_sample_), 121 queue_duration_sample_(other.queue_duration_sample_),
122 alloc_ops_(other.alloc_ops_),
123 free_ops_(other.free_ops_),
124 allocated_bytes_(other.allocated_bytes_),
125 freed_bytes_(other.freed_bytes_),
126 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
127 max_allocated_bytes_(other.max_allocated_bytes_),
103 last_phase_snapshot_(nullptr) { 128 last_phase_snapshot_(nullptr) {
104 // This constructor will be used by std::map when adding new DeathData values 129 // This constructor will be used by std::map when adding new DeathData values
105 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 130 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
106 // need to worry about ownership transfer. 131 // need to worry about ownership transfer.
107 DCHECK(other.last_phase_snapshot_ == nullptr); 132 DCHECK(other.last_phase_snapshot_ == nullptr);
108 } 133 }
109 134
110 DeathData::~DeathData() { 135 DeathData::~DeathData() {
111 while (last_phase_snapshot_) { 136 while (last_phase_snapshot_) {
112 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_; 137 const DeathDataPhaseSnapshot* snapshot = last_phase_snapshot_;
113 last_phase_snapshot_ = snapshot->prev; 138 last_phase_snapshot_ = snapshot->prev;
114 delete snapshot; 139 delete snapshot;
115 } 140 }
116 } 141 }
117 142
118 // TODO(jar): I need to see if this macro to optimize branching is worth using. 143 // TODO(jar): I need to see if this macro to optimize branching is worth using.
119 // 144 //
120 // This macro has no branching, so it is surely fast, and is equivalent to: 145 // This macro has no branching, so it is surely fast, and is equivalent to:
121 // if (assign_it) 146 // if (assign_it)
122 // target = source; 147 // target = source;
123 // We use a macro rather than a template to force this to inline. 148 // We use a macro rather than a template to force this to inline.
124 // Related code for calculating max is discussed on the web. 149 // Related code for calculating max is discussed on the web.
125 #define CONDITIONAL_ASSIGN(assign_it, target, source) \ 150 #define CONDITIONAL_ASSIGN(assign_it, target, source) \
126 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it)) 151 ((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
127 152
128 void DeathData::RecordDeath(const int32_t queue_duration, 153 void DeathData::RecordDurations(const int32_t queue_duration,
129 const int32_t run_duration, 154 const int32_t run_duration,
130 const uint32_t random_number) { 155 const uint32_t random_number) {
131 // We'll just clamp at INT_MAX, but we should note this in the UI as such. 156 // We'll just clamp at INT_MAX, but we should note this in the UI as such.
132 if (count_ < INT_MAX) 157 if (count_ < INT_MAX)
133 base::subtle::NoBarrier_Store(&count_, count_ + 1); 158 base::subtle::NoBarrier_Store(&count_, count_ + 1);
134 159
135 int sample_probability_count = 160 int sample_probability_count =
136 base::subtle::NoBarrier_Load(&sample_probability_count_); 161 base::subtle::NoBarrier_Load(&sample_probability_count_);
137 if (sample_probability_count < INT_MAX) 162 if (sample_probability_count < INT_MAX)
138 ++sample_probability_count; 163 ++sample_probability_count;
139 base::subtle::NoBarrier_Store(&sample_probability_count_, 164 base::subtle::NoBarrier_Store(&sample_probability_count_,
140 sample_probability_count); 165 sample_probability_count);
(...skipping 16 matching lines...) Expand all
157 // but that should be inconsequentially likely). We ignore the fact that we 182 // but that should be inconsequentially likely). We ignore the fact that we
158 // correlated our selection of a sample to the run and queue times (i.e., we 183 // correlated our selection of a sample to the run and queue times (i.e., we
159 // used them to generate random_number). 184 // used them to generate random_number).
160 CHECK_GT(sample_probability_count, 0); 185 CHECK_GT(sample_probability_count, 0);
161 if (0 == (random_number % sample_probability_count)) { 186 if (0 == (random_number % sample_probability_count)) {
162 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration); 187 base::subtle::NoBarrier_Store(&queue_duration_sample_, queue_duration);
163 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 188 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
164 } 189 }
165 } 190 }
166 191
192 void DeathData::RecordAllocations(const uint32_t alloc_ops,
193 const uint32_t free_ops,
194 const uint32_t allocated_bytes,
195 const uint32_t freed_bytes,
196 const uint32_t alloc_overhead_bytes,
197 const uint32_t max_allocated_bytes) {
198 // Use saturating arithmetic.
199 SaturatingAdd(alloc_ops, &alloc_ops_);
200 SaturatingAdd(free_ops, &free_ops_);
201 SaturatingAdd(allocated_bytes, &allocated_bytes_);
202 SaturatingAdd(freed_bytes, &freed_bytes_);
203 SaturatingAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
204
205 if (max_allocated_bytes > INT_MAX)
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 I think base::saturated_cast might help here?
Sigurður Ásgeirsson 2016/10/14 20:11:35 Thanks, neat! I didn't know of that.
206 base::subtle::NoBarrier_Store(&max_allocated_bytes_, INT_MAX);
207 else if (static_cast<int32_t>(max_allocated_bytes) > max_allocated_bytes_)
208 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max_allocated_bytes);
209 }
210
167 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 211 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
168 // Snapshotting and storing current state. 212 // Snapshotting and storing current state.
169 last_phase_snapshot_ = new DeathDataPhaseSnapshot( 213 last_phase_snapshot_ =
170 profiling_phase, count(), run_duration_sum(), run_duration_max(), 214 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
171 run_duration_sample(), queue_duration_sum(), queue_duration_max(),
172 queue_duration_sample(), last_phase_snapshot_);
173 215
174 // Not touching fields for which a delta can be computed by comparing with a 216 // Not touching fields for which a delta can be computed by comparing with a
175 // snapshot from the previous phase. Resetting other fields. Sample values 217 // snapshot from the previous phase. Resetting other fields. Sample values
176 // will be reset upon next death recording because sample_probability_count_ 218 // will be reset upon next death recording because sample_probability_count_
177 // is set to 0. 219 // is set to 0.
178 // We avoid resetting to 0 in favor of deltas whenever possible. The reason 220 // We avoid resetting to 0 in favor of deltas whenever possible. The reason
179 // is that for incrementable fields, resetting to 0 from the snapshot thread 221 // is that for incrementable fields, resetting to 0 from the snapshot thread
180 // potentially in parallel with incrementing in the death thread may result in 222 // potentially in parallel with incrementing in the death thread may result in
181 // significant data corruption that has a potential to grow with time. Not 223 // significant data corruption that has a potential to grow with time. Not
182 // resetting incrementable fields and using deltas will cause any 224 // resetting incrementable fields and using deltas will cause any
(...skipping 19 matching lines...) Expand all
202 } 244 }
203 245
204 //------------------------------------------------------------------------------ 246 //------------------------------------------------------------------------------
205 DeathDataSnapshot::DeathDataSnapshot() 247 DeathDataSnapshot::DeathDataSnapshot()
206 : count(-1), 248 : count(-1),
207 run_duration_sum(-1), 249 run_duration_sum(-1),
208 run_duration_max(-1), 250 run_duration_max(-1),
209 run_duration_sample(-1), 251 run_duration_sample(-1),
210 queue_duration_sum(-1), 252 queue_duration_sum(-1),
211 queue_duration_max(-1), 253 queue_duration_max(-1),
212 queue_duration_sample(-1) { 254 queue_duration_sample(-1),
213 } 255 alloc_ops(-1),
256 free_ops(-1),
257 allocated_bytes(-1),
258 freed_bytes(-1),
259 alloc_overhead_bytes(-1),
260 max_allocated_bytes(-1) {}
214 261
215 DeathDataSnapshot::DeathDataSnapshot(int count, 262 DeathDataSnapshot::DeathDataSnapshot(int count,
216 int32_t run_duration_sum, 263 int32_t run_duration_sum,
217 int32_t run_duration_max, 264 int32_t run_duration_max,
218 int32_t run_duration_sample, 265 int32_t run_duration_sample,
219 int32_t queue_duration_sum, 266 int32_t queue_duration_sum,
220 int32_t queue_duration_max, 267 int32_t queue_duration_max,
221 int32_t queue_duration_sample) 268 int32_t queue_duration_sample,
269 int32_t alloc_ops,
270 int32_t free_ops,
271 int32_t allocated_bytes,
272 int32_t freed_bytes,
273 int32_t alloc_overhead_bytes,
274 int32_t max_allocated_bytes)
222 : count(count), 275 : count(count),
223 run_duration_sum(run_duration_sum), 276 run_duration_sum(run_duration_sum),
224 run_duration_max(run_duration_max), 277 run_duration_max(run_duration_max),
225 run_duration_sample(run_duration_sample), 278 run_duration_sample(run_duration_sample),
226 queue_duration_sum(queue_duration_sum), 279 queue_duration_sum(queue_duration_sum),
227 queue_duration_max(queue_duration_max), 280 queue_duration_max(queue_duration_max),
228 queue_duration_sample(queue_duration_sample) {} 281 queue_duration_sample(queue_duration_sample),
282 alloc_ops(alloc_ops),
283 free_ops(free_ops),
284 allocated_bytes(allocated_bytes),
285 freed_bytes(freed_bytes),
286 alloc_overhead_bytes(alloc_overhead_bytes),
287 max_allocated_bytes(max_allocated_bytes) {}
288
289 DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
290 : count(death_data.count()),
291 run_duration_sum(death_data.run_duration_sum()),
292 run_duration_max(death_data.run_duration_max()),
293 run_duration_sample(death_data.run_duration_sample()),
294 queue_duration_sum(death_data.queue_duration_sum()),
295 queue_duration_max(death_data.queue_duration_max()),
296 queue_duration_sample(death_data.queue_duration_sample()),
297 alloc_ops(death_data.alloc_ops()),
298 free_ops(death_data.free_ops()),
299 allocated_bytes(death_data.allocated_bytes()),
300 freed_bytes(death_data.freed_bytes()),
301 alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
302 max_allocated_bytes(death_data.max_allocated_bytes()) {}
229 303
230 DeathDataSnapshot::~DeathDataSnapshot() { 304 DeathDataSnapshot::~DeathDataSnapshot() {
231 } 305 }
232 306
233 DeathDataSnapshot DeathDataSnapshot::Delta( 307 DeathDataSnapshot DeathDataSnapshot::Delta(
234 const DeathDataSnapshot& older) const { 308 const DeathDataSnapshot& older) const {
235 return DeathDataSnapshot(count - older.count, 309 return DeathDataSnapshot(
236 run_duration_sum - older.run_duration_sum, 310 count - older.count, run_duration_sum - older.run_duration_sum,
237 run_duration_max, run_duration_sample, 311 run_duration_max, run_duration_sample,
238 queue_duration_sum - older.queue_duration_sum, 312 queue_duration_sum - older.queue_duration_sum, queue_duration_max,
239 queue_duration_max, queue_duration_sample); 313 queue_duration_sample, alloc_ops - older.alloc_ops,
314 free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
315 freed_bytes - older.freed_bytes,
316 alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
240 } 317 }
241 318
242 //------------------------------------------------------------------------------ 319 //------------------------------------------------------------------------------
243 BirthOnThread::BirthOnThread(const Location& location, 320 BirthOnThread::BirthOnThread(const Location& location,
244 const ThreadData& current) 321 const ThreadData& current)
245 : location_(location), 322 : location_(location),
246 birth_thread_(&current) { 323 birth_thread_(&current) {
247 } 324 }
248 325
249 //------------------------------------------------------------------------------ 326 //------------------------------------------------------------------------------
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after
448 525
449 // Add births that are still active -- i.e. objects that have tallied a birth, 526 // Add births that are still active -- i.e. objects that have tallied a birth,
450 // but have not yet tallied a matching death, and hence must be either 527 // but have not yet tallied a matching death, and hence must be either
451 // running, queued up, or being held in limbo for future posting. 528 // running, queued up, or being held in limbo for future posting.
452 auto* current_phase_tasks = 529 auto* current_phase_tasks =
453 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks; 530 &process_data_snapshot->phased_snapshots[current_profiling_phase].tasks;
454 for (const auto& birth_count : birth_counts) { 531 for (const auto& birth_count : birth_counts) {
455 if (birth_count.second > 0) { 532 if (birth_count.second > 0) {
456 current_phase_tasks->push_back( 533 current_phase_tasks->push_back(
457 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first), 534 TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
458 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0), 535 DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
536 0, 0, 0, 0, 0, 0),
459 "Still_Alive")); 537 "Still_Alive"));
460 } 538 }
461 } 539 }
462 } 540 }
463 541
464 // static 542 // static
465 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) { 543 void ThreadData::OnProfilingPhaseCompleted(int profiling_phase) {
466 // Get an unchanging copy of a ThreadData list. 544 // Get an unchanging copy of a ThreadData list.
467 ThreadData* my_list = ThreadData::first(); 545 ThreadData* my_list = ThreadData::first();
468 546
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
507 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0)); 585 static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
508 586
509 DeathMap::iterator it = death_map_.find(&births); 587 DeathMap::iterator it = death_map_.find(&births);
510 DeathData* death_data; 588 DeathData* death_data;
511 if (it != death_map_.end()) { 589 if (it != death_map_.end()) {
512 death_data = &it->second; 590 death_data = &it->second;
513 } else { 591 } else {
514 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now. 592 base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
515 death_data = &death_map_[&births]; 593 death_data = &death_map_[&births];
516 } // Release lock ASAP. 594 } // Release lock ASAP.
517 death_data->RecordDeath(queue_duration, run_duration, random_number_); 595 death_data->RecordDurations(queue_duration, run_duration, random_number_);
596
597 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
598 // TODO(siggi): Make this conditional on whether heap tracking is enabled.
599 // TODO(siggi): Should these be passed as uint64_t perhaps?
600 // DO NOT SUBMIT
601 base::debug::ThreadAllocatorUsage heap_usage = stopwatch.heap_usage().usage();
602 death_data->RecordAllocations(
603 static_cast<int32_t>(heap_usage.alloc_ops),
604 static_cast<int32_t>(heap_usage.free_ops),
605 static_cast<int32_t>(heap_usage.alloc_bytes),
606 static_cast<int32_t>(heap_usage.free_bytes),
607 static_cast<int32_t>(heap_usage.alloc_overhead_bytes),
608 static_cast<int32_t>(heap_usage.max_allocated_bytes));
609 #endif
518 } 610 }
519 611
520 // static 612 // static
521 Births* ThreadData::TallyABirthIfActive(const Location& location) { 613 Births* ThreadData::TallyABirthIfActive(const Location& location) {
522 if (!TrackingStatus()) 614 if (!TrackingStatus())
523 return NULL; 615 return NULL;
524 ThreadData* current_thread_data = Get(); 616 ThreadData* current_thread_data = Get();
525 if (!current_thread_data) 617 if (!current_thread_data)
526 return NULL; 618 return NULL;
527 return current_thread_data->TallyABirth(location); 619 return current_thread_data->TallyABirth(location);
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 BirthMap* birth_map, 738 BirthMap* birth_map,
647 DeathsSnapshot* deaths) { 739 DeathsSnapshot* deaths) {
648 base::AutoLock lock(map_lock_); 740 base::AutoLock lock(map_lock_);
649 741
650 for (const auto& birth : birth_map_) 742 for (const auto& birth : birth_map_)
651 (*birth_map)[birth.first] = birth.second; 743 (*birth_map)[birth.first] = birth.second;
652 744
653 for (const auto& death : death_map_) { 745 for (const auto& death : death_map_) {
654 deaths->push_back(std::make_pair( 746 deaths->push_back(std::make_pair(
655 death.first, 747 death.first,
656 DeathDataPhaseSnapshot(profiling_phase, death.second.count(), 748 DeathDataPhaseSnapshot(profiling_phase, death.second,
657 death.second.run_duration_sum(),
658 death.second.run_duration_max(),
659 death.second.run_duration_sample(),
660 death.second.queue_duration_sum(),
661 death.second.queue_duration_max(),
662 death.second.queue_duration_sample(),
663 death.second.last_phase_snapshot()))); 749 death.second.last_phase_snapshot())));
664 } 750 }
665 } 751 }
666 752
667 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) { 753 void ThreadData::OnProfilingPhaseCompletedOnThread(int profiling_phase) {
668 base::AutoLock lock(map_lock_); 754 base::AutoLock lock(map_lock_);
669 755
670 for (auto& death : death_map_) { 756 for (auto& death : death_map_) {
671 death.second.OnProfilingPhaseCompleted(profiling_phase); 757 death.second.OnProfilingPhaseCompleted(profiling_phase);
672 } 758 }
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after
832 #endif 918 #endif
833 } 919 }
834 920
835 void TaskStopwatch::Start() { 921 void TaskStopwatch::Start() {
836 #if DCHECK_IS_ON() 922 #if DCHECK_IS_ON()
837 DCHECK(state_ == CREATED); 923 DCHECK(state_ == CREATED);
838 state_ = RUNNING; 924 state_ = RUNNING;
839 #endif 925 #endif
840 926
841 start_time_ = ThreadData::Now(); 927 start_time_ = ThreadData::Now();
928 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
929 heap_usage_.Start();
930 #endif
842 931
843 current_thread_data_ = ThreadData::Get(); 932 current_thread_data_ = ThreadData::Get();
844 if (!current_thread_data_) 933 if (!current_thread_data_)
845 return; 934 return;
846 935
847 parent_ = current_thread_data_->current_stopwatch_; 936 parent_ = current_thread_data_->current_stopwatch_;
848 #if DCHECK_IS_ON() 937 #if DCHECK_IS_ON()
849 if (parent_) { 938 if (parent_) {
850 DCHECK(parent_->state_ == RUNNING); 939 DCHECK(parent_->state_ == RUNNING);
851 DCHECK(parent_->child_ == NULL); 940 DCHECK(parent_->child_ == NULL);
852 parent_->child_ = this; 941 parent_->child_ = this;
853 } 942 }
854 #endif 943 #endif
855 current_thread_data_->current_stopwatch_ = this; 944 current_thread_data_->current_stopwatch_ = this;
856 } 945 }
857 946
858 void TaskStopwatch::Stop() { 947 void TaskStopwatch::Stop() {
859 const TrackedTime end_time = ThreadData::Now(); 948 const TrackedTime end_time = ThreadData::Now();
860 #if DCHECK_IS_ON() 949 #if DCHECK_IS_ON()
861 DCHECK(state_ == RUNNING); 950 DCHECK(state_ == RUNNING);
862 state_ = STOPPED; 951 state_ = STOPPED;
863 DCHECK(child_ == NULL); 952 DCHECK(child_ == NULL);
953
954 #endif
955 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
956 heap_usage_.Stop(true);
864 #endif 957 #endif
865 958
866 if (!start_time_.is_null() && !end_time.is_null()) { 959 if (!start_time_.is_null() && !end_time.is_null()) {
867 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds(); 960 wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
868 } 961 }
869 962
870 if (!current_thread_data_) 963 if (!current_thread_data_)
871 return; 964 return;
872 965
873 DCHECK(current_thread_data_->current_stopwatch_ == this); 966 DCHECK(current_thread_data_->current_stopwatch_ == this);
(...skipping 19 matching lines...) Expand all
893 } 986 }
894 987
895 int32_t TaskStopwatch::RunDurationMs() const { 988 int32_t TaskStopwatch::RunDurationMs() const {
896 #if DCHECK_IS_ON() 989 #if DCHECK_IS_ON()
897 DCHECK(state_ == STOPPED); 990 DCHECK(state_ == STOPPED);
898 #endif 991 #endif
899 992
900 return wallclock_duration_ms_ - excluded_duration_ms_; 993 return wallclock_duration_ms_ - excluded_duration_ms_;
901 } 994 }
902 995
996 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
997 const base::debug::HeapUsageTracker& TaskStopwatch::heap_usage() const {
998 return heap_usage_;
999 }
1000 #endif
1001
903 ThreadData* TaskStopwatch::GetThreadData() const { 1002 ThreadData* TaskStopwatch::GetThreadData() const {
904 #if DCHECK_IS_ON() 1003 #if DCHECK_IS_ON()
905 DCHECK(state_ != CREATED); 1004 DCHECK(state_ != CREATED);
906 #endif 1005 #endif
907 1006
908 return current_thread_data_; 1007 return current_thread_data_;
909 } 1008 }
910 1009
911 //------------------------------------------------------------------------------ 1010 //------------------------------------------------------------------------------
912 // DeathDataPhaseSnapshot 1011 // DeathDataPhaseSnapshot
913 1012
914 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot( 1013 DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
915 int profiling_phase, 1014 int profiling_phase,
916 int count, 1015 const DeathData& death,
917 int32_t run_duration_sum,
918 int32_t run_duration_max,
919 int32_t run_duration_sample,
920 int32_t queue_duration_sum,
921 int32_t queue_duration_max,
922 int32_t queue_duration_sample,
923 const DeathDataPhaseSnapshot* prev) 1016 const DeathDataPhaseSnapshot* prev)
924 : profiling_phase(profiling_phase), 1017 : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
925 death_data(count,
926 run_duration_sum,
927 run_duration_max,
928 run_duration_sample,
929 queue_duration_sum,
930 queue_duration_max,
931 queue_duration_sample),
932 prev(prev) {}
933 1018
934 //------------------------------------------------------------------------------ 1019 //------------------------------------------------------------------------------
935 // TaskSnapshot 1020 // TaskSnapshot
936 1021
937 TaskSnapshot::TaskSnapshot() { 1022 TaskSnapshot::TaskSnapshot() {
938 } 1023 }
939 1024
940 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth, 1025 TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
941 const DeathDataSnapshot& death_data, 1026 const DeathDataSnapshot& death_data,
942 const std::string& death_thread_name) 1027 const std::string& death_thread_name)
(...skipping 28 matching lines...) Expand all
971 #endif 1056 #endif
972 } 1057 }
973 1058
974 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1059 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
975 default; 1060 default;
976 1061
977 ProcessDataSnapshot::~ProcessDataSnapshot() { 1062 ProcessDataSnapshot::~ProcessDataSnapshot() {
978 } 1063 }
979 1064
980 } // namespace tracked_objects 1065 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698