Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: base/tracked_objects.cc

Issue 2859493002: Tracked objects: Bump cumulative byte count storage to 64 bits to avoid saturation (Closed)
Patch Set: Address Gab's comments. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/tracked_objects.h" 5 #include "base/tracked_objects.h"
6 6
7 #include <ctype.h> 7 #include <ctype.h>
8 #include <limits.h> 8 #include <limits.h>
9 #include <stdlib.h> 9 #include <stdlib.h>
10 10
11 #include <limits>
12
11 #include "base/atomicops.h" 13 #include "base/atomicops.h"
12 #include "base/base_switches.h" 14 #include "base/base_switches.h"
13 #include "base/command_line.h" 15 #include "base/command_line.h"
14 #include "base/compiler_specific.h" 16 #include "base/compiler_specific.h"
15 #include "base/debug/leak_annotations.h" 17 #include "base/debug/leak_annotations.h"
16 #include "base/logging.h" 18 #include "base/logging.h"
17 #include "base/metrics/histogram_macros.h" 19 #include "base/metrics/histogram_macros.h"
18 #include "base/numerics/safe_conversions.h" 20 #include "base/numerics/safe_conversions.h"
19 #include "base/numerics/safe_math.h" 21 #include "base/numerics/safe_math.h"
20 #include "base/process/process_handle.h" 22 #include "base/process/process_handle.h"
21 #include "base/third_party/valgrind/memcheck.h" 23 #include "base/third_party/valgrind/memcheck.h"
24 #include "base/threading/platform_thread.h"
22 #include "base/threading/worker_pool.h" 25 #include "base/threading/worker_pool.h"
23 #include "base/tracking_info.h" 26 #include "base/tracking_info.h"
24 #include "build/build_config.h" 27 #include "build/build_config.h"
25 28
26 using base::TimeDelta; 29 using base::TimeDelta;
27 30
28 namespace base { 31 namespace base {
29 class TimeDelta; 32 class TimeDelta;
30 } 33 }
31 34
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 106
104 DeathData::DeathData() 107 DeathData::DeathData()
105 : count_(0), 108 : count_(0),
106 sample_probability_count_(0), 109 sample_probability_count_(0),
107 run_duration_sum_(0), 110 run_duration_sum_(0),
108 queue_duration_sum_(0), 111 queue_duration_sum_(0),
109 run_duration_max_(0), 112 run_duration_max_(0),
110 queue_duration_max_(0), 113 queue_duration_max_(0),
111 alloc_ops_(0), 114 alloc_ops_(0),
112 free_ops_(0), 115 free_ops_(0),
113 allocated_bytes_(0), 116 #if !defined(ARCH_CPU_64_BITS)
114 freed_bytes_(0), 117 byte_update_counter_(0),
115 alloc_overhead_bytes_(0), 118 #endif
119 allocated_bytes_(),
120 freed_bytes_(),
121 alloc_overhead_bytes_(),
116 max_allocated_bytes_(0), 122 max_allocated_bytes_(0),
117 run_duration_sample_(0), 123 run_duration_sample_(0),
118 queue_duration_sample_(0), 124 queue_duration_sample_(0),
119 last_phase_snapshot_(nullptr) {} 125 last_phase_snapshot_(nullptr) {
126 }
120 127
121 DeathData::DeathData(const DeathData& other) 128 DeathData::DeathData(const DeathData& other)
122 : count_(other.count_), 129 : count_(other.count_),
123 sample_probability_count_(other.sample_probability_count_), 130 sample_probability_count_(other.sample_probability_count_),
124 run_duration_sum_(other.run_duration_sum_), 131 run_duration_sum_(other.run_duration_sum_),
125 queue_duration_sum_(other.queue_duration_sum_), 132 queue_duration_sum_(other.queue_duration_sum_),
126 run_duration_max_(other.run_duration_max_), 133 run_duration_max_(other.run_duration_max_),
127 queue_duration_max_(other.queue_duration_max_), 134 queue_duration_max_(other.queue_duration_max_),
128 alloc_ops_(other.alloc_ops_), 135 alloc_ops_(other.alloc_ops_),
129 free_ops_(other.free_ops_), 136 free_ops_(other.free_ops_),
137 #if !defined(ARCH_CPU_64_BITS)
138 byte_update_counter_(0),
139 #endif
130 allocated_bytes_(other.allocated_bytes_), 140 allocated_bytes_(other.allocated_bytes_),
131 freed_bytes_(other.freed_bytes_), 141 freed_bytes_(other.freed_bytes_),
132 alloc_overhead_bytes_(other.alloc_overhead_bytes_), 142 alloc_overhead_bytes_(other.alloc_overhead_bytes_),
133 max_allocated_bytes_(other.max_allocated_bytes_), 143 max_allocated_bytes_(other.max_allocated_bytes_),
134 run_duration_sample_(other.run_duration_sample_), 144 run_duration_sample_(other.run_duration_sample_),
135 queue_duration_sample_(other.queue_duration_sample_), 145 queue_duration_sample_(other.queue_duration_sample_),
136 last_phase_snapshot_(nullptr) { 146 last_phase_snapshot_(nullptr) {
137 // This constructor will be used by std::map when adding new DeathData values 147 // This constructor will be used by std::map when adding new DeathData values
138 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't 148 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't
139 // need to worry about ownership transfer. 149 // need to worry about ownership transfer.
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
196 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); 206 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration);
197 } 207 }
198 } 208 }
199 209
200 void DeathData::RecordAllocations(const uint32_t alloc_ops, 210 void DeathData::RecordAllocations(const uint32_t alloc_ops,
201 const uint32_t free_ops, 211 const uint32_t free_ops,
202 const uint32_t allocated_bytes, 212 const uint32_t allocated_bytes,
203 const uint32_t freed_bytes, 213 const uint32_t freed_bytes,
204 const uint32_t alloc_overhead_bytes, 214 const uint32_t alloc_overhead_bytes,
205 const uint32_t max_allocated_bytes) { 215 const uint32_t max_allocated_bytes) {
216 #if !defined(ARCH_CPU_64_BITS)
217 // On 32 bit systems, we use an even/odd locking scheme to make possible to
218 // read 64 bit sums consistently. Note that since writes are bound to the
219 // thread owning this DeathData, there's no race on these writes.
220 int32_t counter_val =
221 base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, 1);
222 // The counter must be odd.
223 DCHECK_EQ(1, counter_val & 1);
224 #endif
225
206 // Use saturating arithmetic. 226 // Use saturating arithmetic.
207 SaturatingMemberAdd(alloc_ops, &alloc_ops_); 227 SaturatingMemberAdd(alloc_ops, &alloc_ops_);
208 SaturatingMemberAdd(free_ops, &free_ops_); 228 SaturatingMemberAdd(free_ops, &free_ops_);
209 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); 229 SaturatingByteCountMemberAdd(allocated_bytes, &allocated_bytes_);
210 SaturatingMemberAdd(freed_bytes, &freed_bytes_); 230 SaturatingByteCountMemberAdd(freed_bytes, &freed_bytes_);
211 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); 231 SaturatingByteCountMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
212 232
213 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); 233 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
214 if (max > max_allocated_bytes_) 234 if (max > max_allocated_bytes_)
215 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); 235 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
236
237 #if !defined(ARCH_CPU_64_BITS)
238 // Now release the value while rolling to even.
239 counter_val = base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, 1);
240 DCHECK_EQ(0, counter_val & 1);
241 #endif
216 } 242 }
217 243
218 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { 244 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
219 // Snapshotting and storing current state. 245 // Snapshotting and storing current state.
220 last_phase_snapshot_ = 246 last_phase_snapshot_ =
221 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); 247 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
222 248
223 // Not touching fields for which a delta can be computed by comparing with a 249 // Not touching fields for which a delta can be computed by comparing with a
224 // snapshot from the previous phase. Resetting other fields. Sample values 250 // snapshot from the previous phase. Resetting other fields. Sample values
225 // will be reset upon next death recording because sample_probability_count_ 251 // will be reset upon next death recording because sample_probability_count_
(...skipping 17 matching lines...) Expand all
243 // The damage is limited to selecting a wrong sample, which is not something 269 // The damage is limited to selecting a wrong sample, which is not something
244 // that can cause accumulating or cascading effects. 270 // that can cause accumulating or cascading effects.
245 // If there were no inconsistencies caused by race conditions, we never send a 271 // If there were no inconsistencies caused by race conditions, we never send a
246 // sample for the previous phase in the next phase's snapshot because 272 // sample for the previous phase in the next phase's snapshot because
247 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. 273 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count.
248 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); 274 base::subtle::NoBarrier_Store(&sample_probability_count_, 0);
249 base::subtle::NoBarrier_Store(&run_duration_max_, 0); 275 base::subtle::NoBarrier_Store(&run_duration_max_, 0);
250 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); 276 base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
251 } 277 }
252 278
279 int64_t DeathData::CumulativeByteCountRead(const CumulativeByteCount* count) {
gab 2017/05/03 19:40:33 // static
Sigurður Ásgeirsson 2017/05/03 20:10:10 Done.
280 #if defined(ARCH_CPU_64_BITS)
281 return *count;
282 #else
283 return static_cast<int64_t>(count->hi_word) << 32 |
284 static_cast<uint32_t>(count->lo_word);
gab 2017/05/03 19:40:33 Atomics should always be read using atomic ops (i.
Sigurður Ásgeirsson 2017/05/03 20:10:10 I did this for consistency with the rest of this c
gab 2017/05/03 20:24:21 Most of that methods uses atomic ops, I'm seeing
285 #endif
286 }
287
288 int64_t DeathData::ConsistentCumulativeByteCountRead(
289 const CumulativeByteCount* count) const {
290 #if defined(ARCH_CPU_64_BITS)
291 return base::subtle::NoBarrier_Load(count);
292 #else
293 // We're on a 32 bit system, this is going to be complicated.
294 while (true) {
295 int32_t update_counter = 0;
296 // Acquire the starting count, spin until it's even.
297
298 // The value of |kYieldProcessorTries| is cargo culted from the page
299 // allocator, TCMalloc, Window critical section defaults, and various other
300 // recommendations.
301 // This is not performance critical here, as the reads are vanishingly rare
302 // and only happen under the --enable-heap-profiling=task-profiler flag.
303 constexpr size_t kYieldProcessorTries = 1000;
304 size_t lock_attempts = 0;
305 do {
306 ++lock_attempts;
307 if (lock_attempts == kYieldProcessorTries) {
308 // Yield the current thread periodically to avoid writer starvation.
309 base::PlatformThread::YieldCurrentThread();
310 lock_attempts = 0;
311 }
312
313 update_counter = base::subtle::NoBarrier_Load(&byte_update_counter_);
314 } while (update_counter & 1);
315
316 // Make sure the reads below see all changes before the update counter.
317 base::subtle::MemoryBarrier();
318
319 DCHECK_EQ(update_counter & 1, 0);
320
321 int64_t value =
322 static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word))
323 << 32 |
324 static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word));
325
326 // If the count has not changed, the read is consistent.
327 // Otherwise go around and try again.
gab 2017/05/03 19:40:33 // Release_Load() semantics here ensure that the |
Sigurður Ásgeirsson 2017/05/03 20:10:10 Done.
328 if (update_counter == base::subtle::Release_Load(&byte_update_counter_))
329 return value;
330 }
331 #endif
332 }
333
253 void DeathData::SaturatingMemberAdd(const uint32_t addend, 334 void DeathData::SaturatingMemberAdd(const uint32_t addend,
gab 2017/05/03 19:40:33 // static
Sigurður Ásgeirsson 2017/05/03 20:10:10 Done.
254 base::subtle::Atomic32* sum) { 335 base::subtle::Atomic32* sum) {
336 constexpr int32_t kInt32Max = std::numeric_limits<int32_t>::max();
255 // Bail quick if no work or already saturated. 337 // Bail quick if no work or already saturated.
256 if (addend == 0U || *sum == INT_MAX) 338 if (addend == 0U || *sum == kInt32Max)
257 return; 339 return;
258 340
259 base::CheckedNumeric<int32_t> new_sum = *sum; 341 base::CheckedNumeric<int32_t> new_sum = *sum;
260 new_sum += addend; 342 new_sum += addend;
261 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); 343 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(kInt32Max));
344 }
345
346 void DeathData::SaturatingByteCountMemberAdd(const uint32_t addend,
347 CumulativeByteCount* sum) {
348 constexpr int64_t kInt64Max = std::numeric_limits<int64_t>::max();
349 // Bail quick if no work or already saturated.
350 if (addend == 0U || CumulativeByteCountRead(sum) == kInt64Max)
351 return;
352
353 base::CheckedNumeric<int64_t> new_sum = CumulativeByteCountRead(sum);
354 new_sum += addend;
355 int64_t new_value = new_sum.ValueOrDefault(kInt64Max);
356 // Update our value.
357 #if defined(ARCH_CPU_64_BITS)
358 base::subtle::NoBarrier_Store(sum, new_value);
359 #else
gab 2017/05/03 19:40:33 For documentation: // This must only be called wh
Sigurður Ásgeirsson 2017/05/03 20:10:10 Done.
360 base::subtle::NoBarrier_Store(&sum->hi_word,
361 static_cast<int32_t>(new_value >> 32));
362 base::subtle::NoBarrier_Store(&sum->lo_word,
363 static_cast<int32_t>(new_value & 0xFFFFFFFF));
364 #endif
262 } 365 }
263 366
264 //------------------------------------------------------------------------------ 367 //------------------------------------------------------------------------------
265 DeathDataSnapshot::DeathDataSnapshot() 368 DeathDataSnapshot::DeathDataSnapshot()
266 : count(-1), 369 : count(-1),
267 run_duration_sum(-1), 370 run_duration_sum(-1),
268 run_duration_max(-1), 371 run_duration_max(-1),
269 run_duration_sample(-1), 372 run_duration_sample(-1),
270 queue_duration_sum(-1), 373 queue_duration_sum(-1),
271 queue_duration_max(-1), 374 queue_duration_max(-1),
272 queue_duration_sample(-1), 375 queue_duration_sample(-1),
273 alloc_ops(-1), 376 alloc_ops(-1),
274 free_ops(-1), 377 free_ops(-1),
275 allocated_bytes(-1), 378 allocated_bytes(-1),
276 freed_bytes(-1), 379 freed_bytes(-1),
277 alloc_overhead_bytes(-1), 380 alloc_overhead_bytes(-1),
278 max_allocated_bytes(-1) {} 381 max_allocated_bytes(-1) {}
279 382
280 DeathDataSnapshot::DeathDataSnapshot(int count, 383 DeathDataSnapshot::DeathDataSnapshot(int count,
281 int32_t run_duration_sum, 384 int32_t run_duration_sum,
282 int32_t run_duration_max, 385 int32_t run_duration_max,
283 int32_t run_duration_sample, 386 int32_t run_duration_sample,
284 int32_t queue_duration_sum, 387 int32_t queue_duration_sum,
285 int32_t queue_duration_max, 388 int32_t queue_duration_max,
286 int32_t queue_duration_sample, 389 int32_t queue_duration_sample,
287 int32_t alloc_ops, 390 int32_t alloc_ops,
288 int32_t free_ops, 391 int32_t free_ops,
289 int32_t allocated_bytes, 392 int64_t allocated_bytes,
290 int32_t freed_bytes, 393 int64_t freed_bytes,
291 int32_t alloc_overhead_bytes, 394 int64_t alloc_overhead_bytes,
292 int32_t max_allocated_bytes) 395 int32_t max_allocated_bytes)
293 : count(count), 396 : count(count),
294 run_duration_sum(run_duration_sum), 397 run_duration_sum(run_duration_sum),
295 run_duration_max(run_duration_max), 398 run_duration_max(run_duration_max),
296 run_duration_sample(run_duration_sample), 399 run_duration_sample(run_duration_sample),
297 queue_duration_sum(queue_duration_sum), 400 queue_duration_sum(queue_duration_sum),
298 queue_duration_max(queue_duration_max), 401 queue_duration_max(queue_duration_max),
299 queue_duration_sample(queue_duration_sample), 402 queue_duration_sample(queue_duration_sample),
300 alloc_ops(alloc_ops), 403 alloc_ops(alloc_ops),
301 free_ops(free_ops), 404 free_ops(free_ops),
(...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after
1076 #endif 1179 #endif
1077 } 1180 }
1078 1181
1079 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = 1182 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
1080 default; 1183 default;
1081 1184
1082 ProcessDataSnapshot::~ProcessDataSnapshot() { 1185 ProcessDataSnapshot::~ProcessDataSnapshot() {
1083 } 1186 }
1084 1187
1085 } // namespace tracked_objects 1188 } // namespace tracked_objects
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698