Chromium Code Reviews| Index: base/tracked_objects.cc |
| diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc |
| index 1507c0986c23c2d4d5e82503023caac68e043ffe..71bca8200cc0a269d6cba2cb0ae2c2b71240c8ab 100644 |
| --- a/base/tracked_objects.cc |
| +++ b/base/tracked_objects.cc |
| @@ -110,13 +110,17 @@ DeathData::DeathData() |
| queue_duration_max_(0), |
| alloc_ops_(0), |
| free_ops_(0), |
| - allocated_bytes_(0), |
| - freed_bytes_(0), |
| - alloc_overhead_bytes_(0), |
| +#if !defined(ARCH_CPU_64_BITS) |
| + byte_update_counter_(0), |
| +#endif |
| + allocated_bytes_(), |
| + freed_bytes_(), |
| + alloc_overhead_bytes_(), |
| max_allocated_bytes_(0), |
| run_duration_sample_(0), |
| queue_duration_sample_(0), |
| - last_phase_snapshot_(nullptr) {} |
| + last_phase_snapshot_(nullptr) { |
| +} |
| DeathData::DeathData(const DeathData& other) |
| : count_(other.count_), |
| @@ -127,6 +131,9 @@ DeathData::DeathData(const DeathData& other) |
| queue_duration_max_(other.queue_duration_max_), |
| alloc_ops_(other.alloc_ops_), |
| free_ops_(other.free_ops_), |
| +#if !defined(ARCH_CPU_64_BITS) |
| + byte_update_counter_(0), |
| +#endif |
| allocated_bytes_(other.allocated_bytes_), |
| freed_bytes_(other.freed_bytes_), |
| alloc_overhead_bytes_(other.alloc_overhead_bytes_), |
| @@ -203,16 +210,32 @@ void DeathData::RecordAllocations(const uint32_t alloc_ops, |
| const uint32_t freed_bytes, |
| const uint32_t alloc_overhead_bytes, |
| const uint32_t max_allocated_bytes) { |
| +#if !defined(ARCH_CPU_64_BITS) |
| + // On 32 bit systems, we use an even/odd locking scheme to make possible to |
| + // read 64 bit sums consistently. |
|
chrisha
2017/05/02 18:48:32
Maybe a small repeat of the fact that *writing* ca
Sigurður Ásgeirsson
2017/05/02 18:53:28
Done.
|
| + int32_t counter_val = |
| + base::subtle::NoBarrier_AtomicIncrement(&byte_update_counter_, 1); |
| + // The counter must be odd. |
| + DCHECK_EQ(1, counter_val & 1); |
| +#endif |
| + |
| // Use saturating arithmetic. |
| SaturatingMemberAdd(alloc_ops, &alloc_ops_); |
| SaturatingMemberAdd(free_ops, &free_ops_); |
| - SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); |
| - SaturatingMemberAdd(freed_bytes, &freed_bytes_); |
| - SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); |
| + SaturatingByteCountMemberAdd(allocated_bytes, &allocated_bytes_); |
| + SaturatingByteCountMemberAdd(freed_bytes, &freed_bytes_); |
| + SaturatingByteCountMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); |
| int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); |
| if (max > max_allocated_bytes_) |
| base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); |
| + |
| +#if !defined(ARCH_CPU_64_BITS) |
| + // Now release the value while rolling to even. |
| + counter_val = |
| + base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, -1); |
| + DCHECK_EQ(0, counter_val & 1); |
| +#endif |
| } |
| void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| @@ -250,6 +273,43 @@ void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| base::subtle::NoBarrier_Store(&queue_duration_max_, 0); |
| } |
| +int64_t DeathData::CumulativeByteCountRead(const CumulativeByteCount* count) { |
| +#if defined(ARCH_CPU_64_BITS) |
| + return count; |
| +#else |
| + return static_cast<int64_t>(count->hi_word) << 32 | |
| + static_cast<uint32_t>(count->lo_word); |
| +#endif |
| +} |
| + |
| +int64_t DeathData::ConsistentCumulativeByteCountRead( |
| + const CumulativeByteCount* count) const { |
| +#if defined(ARCH_CPU_64_BITS) |
| + return base::subtle::NoBarrier_Load(count); |
| +#else |
| + // We're on a 32 bit system, this is going to be complicated. |
| + while (true) { |
| + int32_t update_counter = 0; |
| + // Acquire the starting count, spin until it's even. |
| + do { |
| + update_counter = base::subtle::NoBarrier_Load(&byte_update_counter_); |
| + } while (update_counter & 1); |
| + |
| + DCHECK_EQ(update_counter & 1, 0); |
| + |
| + int64_t value = |
| + static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word)) |
| + << 32 | |
| + static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word)); |
| + |
| + // If the count has not changed, the read is consistent. |
| + // Otherwise go around and try again. |
| + if (update_counter == base::subtle::NoBarrier_Load(&byte_update_counter_)) |
| + return value; |
| + } |
| +#endif |
| +} |
| + |
| void DeathData::SaturatingMemberAdd(const uint32_t addend, |
| base::subtle::Atomic32* sum) { |
| // Bail quick if no work or already saturated. |
| @@ -261,6 +321,26 @@ void DeathData::SaturatingMemberAdd(const uint32_t addend, |
| base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); |
| } |
| +void DeathData::SaturatingByteCountMemberAdd(const uint32_t addend, |
| + CumulativeByteCount* sum) { |
| + // Bail quick if no work or already saturated. |
| + if (addend == 0U || CumulativeByteCountRead(sum) == LONG_MAX) |
| + return; |
| + |
| + base::CheckedNumeric<int64_t> new_sum = CumulativeByteCountRead(sum); |
| + new_sum += addend; |
| + int64_t new_value = new_sum.ValueOrDefault(LONG_MAX); |
| +// Update our value. |
| +#if defined(ARCH_CPU_64_BITS) |
| + base::subtle::NoBarrier_Store(sum, new_value); |
| +#else |
| + base::subtle::NoBarrier_Store(&sum->hi_word, |
| + static_cast<int32_t>(new_value >> 32)); |
| + base::subtle::NoBarrier_Store(&sum->lo_word, |
| + static_cast<int32_t>(new_value & 0xFFFFFFFF)); |
| +#endif |
| +} |
| + |
| //------------------------------------------------------------------------------ |
| DeathDataSnapshot::DeathDataSnapshot() |
| : count(-1), |
| @@ -286,9 +366,9 @@ DeathDataSnapshot::DeathDataSnapshot(int count, |
| int32_t queue_duration_sample, |
| int32_t alloc_ops, |
| int32_t free_ops, |
| - int32_t allocated_bytes, |
| - int32_t freed_bytes, |
| - int32_t alloc_overhead_bytes, |
| + int64_t allocated_bytes, |
| + int64_t freed_bytes, |
| + int64_t alloc_overhead_bytes, |
| int32_t max_allocated_bytes) |
| : count(count), |
| run_duration_sum(run_duration_sum), |