Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: base/tracked_objects.cc

Issue 2859493002: Tracked objects: Bump cumulative byte count storage to 64 bits to avoid saturation (Closed)
Patch Set: Fix 64 bit compile, doofus!. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <ctype.h> 7 #include <ctype.h>
8 #include <limits.h> 8 #include <limits.h>
9 #include <stdlib.h> 9 #include <stdlib.h>
10 10
11 #include <limits>
12
11 #include "base/atomicops.h" 13 #include "base/atomicops.h"
12 #include "base/base_switches.h" 14 #include "base/base_switches.h"
13 #include "base/command_line.h" 15 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 16 #include "base/compiler_specific.h"
15 #include "base/debug/leak_annotations.h" 17 #include "base/debug/leak_annotations.h"
16 #include "base/logging.h" 18 #include "base/logging.h"
17 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
18 #include "base/numerics/safe_conversions.h" 20 #include "base/numerics/safe_conversions.h"
19 #include "base/numerics/safe_math.h" 21 #include "base/numerics/safe_math.h"
20 #include "base/process/process_handle.h" 22 #include "base/process/process_handle.h"
21 #include "base/third_party/valgrind/memcheck.h" 23 #include "base/third_party/valgrind/memcheck.h"
24 #include "base/threading/platform_thread.h"
22 #include "base/threading/worker_pool.h" 25 #include "base/threading/worker_pool.h"
23 #include "base/tracking_info.h" 26 #include "base/tracking_info.h"
24 #include "build/build_config.h" 27 #include "build/build_config.h"
25 28
26 using base::TimeDelta; 29 using base::TimeDelta;
27 30
28 namespace base { 31 namespace base {
29 class TimeDelta; 32 class TimeDelta;
30 } 33 }
31 34
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 106
104 DeathData::DeathData() 107 DeathData::DeathData()
105 : count_(0), 108 : count_(0),
106 sample_probability_count_(0), 109 sample_probability_count_(0),
107 run_duration_sum_(0), 110 run_duration_sum_(0),
108 queue_duration_sum_(0), 111 queue_duration_sum_(0),
109 run_duration_max_(0), 112 run_duration_max_(0),
110 queue_duration_max_(0), 113 queue_duration_max_(0),
111 alloc_ops_(0), 114 alloc_ops_(0),
112 free_ops_(0), 115 free_ops_(0),
113 allocated_bytes_(0), 116 #if !defined(ARCH_CPU_64_BITS)
114 freed_bytes_(0), 117 byte_update_counter_(0),
115 alloc_overhead_bytes_(0), 118 #endif
119 allocated_bytes_(),
120 freed_bytes_(),
121 alloc_overhead_bytes_(),
116 max_allocated_bytes_(0), 122 max_allocated_bytes_(0),
117 run_duration_sample_(0), 123 run_duration_sample_(0),
118 queue_duration_sample_(0), 124 queue_duration_sample_(0),
119 last_phase_snapshot_(nullptr) {} 125 last_phase_snapshot_(nullptr) {
126 }
120 127
121 DeathData::DeathData(const DeathData& other) 128 DeathData::DeathData(const DeathData& other)
122 : count_(other.count_), 129 : count_(other.count_),
123 sample_probability_count_(other.sample_probability_count_), 130 sample_probability_count_(other.sample_probability_count_),
124 run_duration_sum_(other.run_duration_sum_), 131 run_duration_sum_(other.run_duration_sum_),
125 queue_duration_sum_(other.queue_duration_sum_), 132 queue_duration_sum_(other.queue_duration_sum_),
126 run_duration_max_(other.run_duration_max_), 133 run_duration_max_(other.run_duration_max_),
127 queue_duration_max_(other.queue_duration_max_), 134 queue_duration_max_(other.queue_duration_max_),
128 alloc_ops_(other.alloc_ops_), 135 alloc_ops_(other.alloc_ops_),
129 free_ops_(other.free_ops_), 136 free_ops_(other.free_ops_),
137 #if !defined(ARCH_CPU_64_BITS)
138 byte_update_counter_(0),
139 #endif
130 allocated_bytes_(other.allocated_bytes_), 140 allocated_bytes_(other.allocated_bytes_),
131 freed_bytes_(other.freed_bytes_), 141 freed_bytes_(other.freed_bytes_),
132 alloc_overhead_bytes_(other.alloc_overhead_bytes_), 142 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
133 max_allocated_bytes_(other.max_allocated_bytes_), 143 max_allocated_bytes_(other.max_allocated_bytes_),
134 run_duration_sample_(other.run_duration_sample_), 144 run_duration_sample_(other.run_duration_sample_),
135 queue_duration_sample_(other.queue_duration_sample_), 145 queue_duration_sample_(other.queue_duration_sample_),
136 last_phase_snapshot_(nullptr) { 146 last_phase_snapshot_(nullptr) {
137 // This constructor will be used by std::map when adding new DeathData values 147 // This constructor will be used by std::map when adding new DeathData values
138 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 148 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
139 // need to worry about ownership transfer. 149 // need to worry about ownership transfer.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
196 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 206 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
197 } 207 }
198 } 208 }
199 209
200 void DeathData::RecordAllocations(const uint32_t alloc_ops, 210 void DeathData::RecordAllocations(const uint32_t alloc_ops,
201 const uint32_t free_ops, 211 const uint32_t free_ops,
202 const uint32_t allocated_bytes, 212 const uint32_t allocated_bytes,
203 const uint32_t freed_bytes, 213 const uint32_t freed_bytes,
204 const uint32_t alloc_overhead_bytes, 214 const uint32_t alloc_overhead_bytes,
205 const uint32_t max_allocated_bytes) { 215 const uint32_t max_allocated_bytes) {
216 #if !defined(ARCH_CPU_64_BITS)
217 // On 32 bit systems, we use an even/odd locking scheme to make possible to
218 // read 64 bit sums consistently. Note that since writes are bound to the
219 // thread owning this DeathData, there's no race on these writes.
220 int32_t counter_val =
221 base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, 1);
222 // The counter must be odd.
223 DCHECK_EQ(1, counter_val & 1);
224 #endif
225
206 // Use saturating arithmetic. 226 // Use saturating arithmetic.
207 SaturatingMemberAdd(alloc_ops, &alloc_ops_); 227 SaturatingMemberAdd(alloc_ops, &alloc_ops_);
208 SaturatingMemberAdd(free_ops, &free_ops_); 228 SaturatingMemberAdd(free_ops, &free_ops_);
209 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); 229 SaturatingByteCountMemberAdd(allocated_bytes, &allocated_bytes_);
210 SaturatingMemberAdd(freed_bytes, &freed_bytes_); 230 SaturatingByteCountMemberAdd(freed_bytes, &freed_bytes_);
211 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); 231 SaturatingByteCountMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
212 232
213 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); 233 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
214 if (max > max_allocated_bytes_) 234 if (max > max_allocated_bytes_)
215 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); 235 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
236
237 #if !defined(ARCH_CPU_64_BITS)
238 // Now release the value while rolling to even.
239 counter_val = base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, 1);
240 DCHECK_EQ(0, counter_val & 1);
241 #endif
216 } 242 }
217 243
218 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 244 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
219 // Snapshotting and storing current state. 245 // Snapshotting and storing current state.
220 last_phase_snapshot_ = 246 last_phase_snapshot_ =
221 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); 247 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
222 248
223 // Not touching fields for which a delta can be computed by comparing with a 249 // Not touching fields for which a delta can be computed by comparing with a
224 // snapshot from the previous phase. Resetting other fields. Sample values 250 // snapshot from the previous phase. Resetting other fields. Sample values
225 // will be reset upon next death recording because sample_probability_count_ 251 // will be reset upon next death recording because sample_probability_count_
(...skipping 17 matching lines...) Expand all
243 // The damage is limited to selecting a wrong sample, which is not something 269 // The damage is limited to selecting a wrong sample, which is not something
244 // that can cause accumulating or cascading effects. 270 // that can cause accumulating or cascading effects.
245 // If there were no inconsistencies caused by race conditions, we never send a 271 // If there were no inconsistencies caused by race conditions, we never send a
246 // sample for the previous phase in the next phase's snapshot because 272 // sample for the previous phase in the next phase's snapshot because
247 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. 273 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
248 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); 274 base::subtle::NoBarrier_Store(&sample_probability_count_, 0);
249 base::subtle::NoBarrier_Store(&run_duration_max_, 0); 275 base::subtle::NoBarrier_Store(&run_duration_max_, 0);
250 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); 276 base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
251 } 277 }
252 278
279 // static
280 int64_t DeathData::UnsafeCumulativeByteCountRead(
281 const CumulativeByteCount* count) {
282 #if defined(ARCH_CPU_64_BITS)
283 return base::subtle::NoBarrier_Load(count);
284 #else
285 return static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word))
286 << 32 |
287 static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word));
288 #endif
289 }
290
291 int64_t DeathData::ConsistentCumulativeByteCountRead(
292 const CumulativeByteCount* count) const {
293 #if defined(ARCH_CPU_64_BITS)
294 return base::subtle::NoBarrier_Load(count);
295 #else
296 // We're on a 32 bit system, this is going to be complicated.
297 while (true) {
298 int32_t update_counter = 0;
299 // Acquire the starting count, spin until it's even.
300
301 // The value of |kYieldProcessorTries| is cargo culted from the page
302 // allocator, TCMalloc, Window critical section defaults, and various other
303 // recommendations.
304 // This is not performance critical here, as the reads are vanishingly rare
305 // and only happen under the --enable-heap-profiling=task-profiler flag.
306 constexpr size_t kYieldProcessorTries = 1000;
307 size_t lock_attempts = 0;
308 do {
309 ++lock_attempts;
310 if (lock_attempts == kYieldProcessorTries) {
311 // Yield the current thread periodically to avoid writer starvation.
312 base::PlatformThread::YieldCurrentThread();
313 lock_attempts = 0;
314 }
315
316 update_counter = base::subtle::NoBarrier_Load(&byte_update_counter_);
317 } while (update_counter & 1);
318
319 // Make sure the reads below see all changes before the update counter.
320 base::subtle::MemoryBarrier();
321
322 DCHECK_EQ(update_counter & 1, 0);
323
324 int64_t value =
325 static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word))
326 << 32 |
327 static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word));
328
329 // Release_Load() semantics here ensure that the |byte_update_counter_|
330 // value seen is at least as old as the |hi_word|/|lo_word| values seen
331 // above, which means that if it's still equal to |update_counter|, the read
332 // is consistent, since the above MemoryBarrier() ensures they're at least
333 // as new as the afore-obtained |update_counter|'s value.
334 if (update_counter == base::subtle::Release_Load(&byte_update_counter_))
335 return value;
336 }
337 #endif
338 }
339
340 // static
253 void DeathData::SaturatingMemberAdd(const uint32_t addend, 341 void DeathData::SaturatingMemberAdd(const uint32_t addend,
254 base::subtle::Atomic32* sum) { 342 base::subtle::Atomic32* sum) {
343 constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
255 // Bail quick if no work or already saturated. 344 // Bail quick if no work or already saturated.
256 if (addend == 0U || *sum == INT_MAX) 345 if (addend == 0U || *sum == kInt32Max)
257 return; 346 return;
258 347
259 base::CheckedNumeric<int32_t> new_sum = *sum; 348 base::CheckedNumeric<int32_t> new_sum = *sum;
260 new_sum += addend; 349 new_sum += addend;
261 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); 350 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(kInt32Max));
351 }
352
353 void DeathData::SaturatingByteCountMemberAdd(const uint32_t addend,
354 CumulativeByteCount* sum) {
355 constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max();
356 // Bail quick if no work or already saturated.
357 if (addend == 0U || UnsafeCumulativeByteCountRead(sum) == kInt64Max)
358 return;
359
360 base::CheckedNumeric<int64_t> new_sum = UnsafeCumulativeByteCountRead(sum);
361 new_sum += addend;
362 int64_t new_value = new_sum.ValueOrDefault(kInt64Max);
363 // Update our value.
364 #if defined(ARCH_CPU_64_BITS)
365 base::subtle::NoBarrier_Store(sum, new_value);
366 #else
367 // This must only be called while the update counter is "locked" (i.e. odd).
368 DCHECK_EQ(base::subtle::NoBarrier_Load(&byte_update_counter_) & 1, 1);
369
370 base::subtle::NoBarrier_Store(&sum->hi_word,
371 static_cast<int32_t>(new_value >> 32));
372 base::subtle::NoBarrier_Store(&sum->lo_word,
373 static_cast<int32_t>(new_value & 0xFFFFFFFF));
374 #endif
262 } 375 }
263 376
264 //------------------------------------------------------------------------------ 377 //------------------------------------------------------------------------------
265 DeathDataSnapshot::DeathDataSnapshot() 378 DeathDataSnapshot::DeathDataSnapshot()
266 : count(-1), 379 : count(-1),
267 run_duration_sum(-1), 380 run_duration_sum(-1),
268 run_duration_max(-1), 381 run_duration_max(-1),
269 run_duration_sample(-1), 382 run_duration_sample(-1),
270 queue_duration_sum(-1), 383 queue_duration_sum(-1),
271 queue_duration_max(-1), 384 queue_duration_max(-1),
272 queue_duration_sample(-1), 385 queue_duration_sample(-1),
273 alloc_ops(-1), 386 alloc_ops(-1),
274 free_ops(-1), 387 free_ops(-1),
275 allocated_bytes(-1), 388 allocated_bytes(-1),
276 freed_bytes(-1), 389 freed_bytes(-1),
277 alloc_overhead_bytes(-1), 390 alloc_overhead_bytes(-1),
278 max_allocated_bytes(-1) {} 391 max_allocated_bytes(-1) {}
279 392
280 DeathDataSnapshot::DeathDataSnapshot(int count, 393 DeathDataSnapshot::DeathDataSnapshot(int count,
281 int32_t run_duration_sum, 394 int32_t run_duration_sum,
282 int32_t run_duration_max, 395 int32_t run_duration_max,
283 int32_t run_duration_sample, 396 int32_t run_duration_sample,
284 int32_t queue_duration_sum, 397 int32_t queue_duration_sum,
285 int32_t queue_duration_max, 398 int32_t queue_duration_max,
286 int32_t queue_duration_sample, 399 int32_t queue_duration_sample,
287 int32_t alloc_ops, 400 int32_t alloc_ops,
288 int32_t free_ops, 401 int32_t free_ops,
289 int32_t allocated_bytes, 402 int64_t allocated_bytes,
290 int32_t freed_bytes, 403 int64_t freed_bytes,
291 int32_t alloc_overhead_bytes, 404 int64_t alloc_overhead_bytes,
292 int32_t max_allocated_bytes) 405 int32_t max_allocated_bytes)
293 : count(count), 406 : count(count),
294 run_duration_sum(run_duration_sum), 407 run_duration_sum(run_duration_sum),
295 run_duration_max(run_duration_max), 408 run_duration_max(run_duration_max),
296 run_duration_sample(run_duration_sample), 409 run_duration_sample(run_duration_sample),
297 queue_duration_sum(queue_duration_sum), 410 queue_duration_sum(queue_duration_sum),
298 queue_duration_max(queue_duration_max), 411 queue_duration_max(queue_duration_max),
299 queue_duration_sample(queue_duration_sample), 412 queue_duration_sample(queue_duration_sample),
300 alloc_ops(alloc_ops), 413 alloc_ops(alloc_ops),
301 free_ops(free_ops), 414 free_ops(free_ops),
(...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after
1076 #endif 1189 #endif
1077 } 1190 }
1078 1191
1079 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1192 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
1080 default; 1193 default;
1081 1194
1082 ProcessDataSnapshot::~ProcessDataSnapshot() { 1195 ProcessDataSnapshot::~ProcessDataSnapshot() {
1083 } 1196 }
1084 1197
1085 } // namespace tracked_objects 1198 } // namespace tracked_objects
OLDNEW
« no previous file with comments | « base/tracked_objects.h ('k') | base/tracked_objects_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698