Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <ctype.h> | 7 #include <ctype.h> |
| 8 #include <limits.h> | 8 #include <limits.h> |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 | 10 |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 103 | 103 |
| 104 DeathData::DeathData() | 104 DeathData::DeathData() |
| 105 : count_(0), | 105 : count_(0), |
| 106 sample_probability_count_(0), | 106 sample_probability_count_(0), |
| 107 run_duration_sum_(0), | 107 run_duration_sum_(0), |
| 108 queue_duration_sum_(0), | 108 queue_duration_sum_(0), |
| 109 run_duration_max_(0), | 109 run_duration_max_(0), |
| 110 queue_duration_max_(0), | 110 queue_duration_max_(0), |
| 111 alloc_ops_(0), | 111 alloc_ops_(0), |
| 112 free_ops_(0), | 112 free_ops_(0), |
| 113 allocated_bytes_(0), | 113 #if !defined(ARCH_CPU_64_BITS) |
| 114 freed_bytes_(0), | 114 byte_update_counter_(0), |
| 115 alloc_overhead_bytes_(0), | 115 #endif |
| 116 allocated_bytes_(), | |
| 117 freed_bytes_(), | |
| 118 alloc_overhead_bytes_(), | |
| 116 max_allocated_bytes_(0), | 119 max_allocated_bytes_(0), |
| 117 run_duration_sample_(0), | 120 run_duration_sample_(0), |
| 118 queue_duration_sample_(0), | 121 queue_duration_sample_(0), |
| 119 last_phase_snapshot_(nullptr) {} | 122 last_phase_snapshot_(nullptr) { |
| 123 } | |
| 120 | 124 |
| 121 DeathData::DeathData(const DeathData& other) | 125 DeathData::DeathData(const DeathData& other) |
| 122 : count_(other.count_), | 126 : count_(other.count_), |
| 123 sample_probability_count_(other.sample_probability_count_), | 127 sample_probability_count_(other.sample_probability_count_), |
| 124 run_duration_sum_(other.run_duration_sum_), | 128 run_duration_sum_(other.run_duration_sum_), |
| 125 queue_duration_sum_(other.queue_duration_sum_), | 129 queue_duration_sum_(other.queue_duration_sum_), |
| 126 run_duration_max_(other.run_duration_max_), | 130 run_duration_max_(other.run_duration_max_), |
| 127 queue_duration_max_(other.queue_duration_max_), | 131 queue_duration_max_(other.queue_duration_max_), |
| 128 alloc_ops_(other.alloc_ops_), | 132 alloc_ops_(other.alloc_ops_), |
| 129 free_ops_(other.free_ops_), | 133 free_ops_(other.free_ops_), |
| 134 #if !defined(ARCH_CPU_64_BITS) | |
| 135 byte_update_counter_(0), | |
| 136 #endif | |
| 130 allocated_bytes_(other.allocated_bytes_), | 137 allocated_bytes_(other.allocated_bytes_), |
| 131 freed_bytes_(other.freed_bytes_), | 138 freed_bytes_(other.freed_bytes_), |
| 132 alloc_overhead_bytes_(other.alloc_overhead_bytes_), | 139 alloc_overhead_bytes_(other.alloc_overhead_bytes_), |
| 133 max_allocated_bytes_(other.max_allocated_bytes_), | 140 max_allocated_bytes_(other.max_allocated_bytes_), |
| 134 run_duration_sample_(other.run_duration_sample_), | 141 run_duration_sample_(other.run_duration_sample_), |
| 135 queue_duration_sample_(other.queue_duration_sample_), | 142 queue_duration_sample_(other.queue_duration_sample_), |
| 136 last_phase_snapshot_(nullptr) { | 143 last_phase_snapshot_(nullptr) { |
| 137 // This constructor will be used by std::map when adding new DeathData values | 144 // This constructor will be used by std::map when adding new DeathData values |
| 138 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't | 145 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't |
| 139 // need to worry about ownership transfer. | 146 // need to worry about ownership transfer. |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 196 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); | 203 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); |
| 197 } | 204 } |
| 198 } | 205 } |
| 199 | 206 |
| 200 void DeathData::RecordAllocations(const uint32_t alloc_ops, | 207 void DeathData::RecordAllocations(const uint32_t alloc_ops, |
| 201 const uint32_t free_ops, | 208 const uint32_t free_ops, |
| 202 const uint32_t allocated_bytes, | 209 const uint32_t allocated_bytes, |
| 203 const uint32_t freed_bytes, | 210 const uint32_t freed_bytes, |
| 204 const uint32_t alloc_overhead_bytes, | 211 const uint32_t alloc_overhead_bytes, |
| 205 const uint32_t max_allocated_bytes) { | 212 const uint32_t max_allocated_bytes) { |
| 213 #if !defined(ARCH_CPU_64_BITS) | |
| 214 // On 32 bit systems, we use an even/odd locking scheme to make possible to | |
| 215 // read 64 bit sums consistently. Note that since writes are bound to the | |
| 216 // thread owning this DeathData, there's no race on these writes. | |
|
gab
2017/05/02 21:17:12
Can we have a base::ThreadChecker check confirm th
Sigurður Ásgeirsson
2017/05/03 15:23:44
I looked into this, and it's not straightforward t
| |
| 217 int32_t counter_val = | |
| 218 base::subtle::NoBarrier_AtomicIncrement(&byte_update_counter_, 1); | |
| 219 // The counter must be odd. | |
| 220 DCHECK_EQ(1, counter_val & 1); | |
| 221 #endif | |
| 222 | |
| 206 // Use saturating arithmetic. | 223 // Use saturating arithmetic. |
| 207 SaturatingMemberAdd(alloc_ops, &alloc_ops_); | 224 SaturatingMemberAdd(alloc_ops, &alloc_ops_); |
| 208 SaturatingMemberAdd(free_ops, &free_ops_); | 225 SaturatingMemberAdd(free_ops, &free_ops_); |
| 209 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); | 226 SaturatingByteCountMemberAdd(allocated_bytes, &allocated_bytes_); |
| 210 SaturatingMemberAdd(freed_bytes, &freed_bytes_); | 227 SaturatingByteCountMemberAdd(freed_bytes, &freed_bytes_); |
| 211 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); | 228 SaturatingByteCountMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); |
| 212 | 229 |
| 213 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); | 230 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); |
| 214 if (max > max_allocated_bytes_) | 231 if (max > max_allocated_bytes_) |
| 215 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); | 232 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); |
| 233 | |
| 234 #if !defined(ARCH_CPU_64_BITS) | |
| 235 // Now release the value while rolling to even. | |
| 236 counter_val = | |
| 237 base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, -1); | |
| 238 DCHECK_EQ(0, counter_val & 1); | |
| 239 #endif | |
| 216 } | 240 } |
| 217 | 241 |
| 218 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { | 242 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 219 // Snapshotting and storing current state. | 243 // Snapshotting and storing current state. |
| 220 last_phase_snapshot_ = | 244 last_phase_snapshot_ = |
| 221 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); | 245 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); |
| 222 | 246 |
| 223 // Not touching fields for which a delta can be computed by comparing with a | 247 // Not touching fields for which a delta can be computed by comparing with a |
| 224 // snapshot from the previous phase. Resetting other fields. Sample values | 248 // snapshot from the previous phase. Resetting other fields. Sample values |
| 225 // will be reset upon next death recording because sample_probability_count_ | 249 // will be reset upon next death recording because sample_probability_count_ |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 243 // The damage is limited to selecting a wrong sample, which is not something | 267 // The damage is limited to selecting a wrong sample, which is not something |
| 244 // that can cause accumulating or cascading effects. | 268 // that can cause accumulating or cascading effects. |
| 245 // If there were no inconsistencies caused by race conditions, we never send a | 269 // If there were no inconsistencies caused by race conditions, we never send a |
| 246 // sample for the previous phase in the next phase's snapshot because | 270 // sample for the previous phase in the next phase's snapshot because |
| 247 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. | 271 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. |
| 248 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); | 272 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); |
| 249 base::subtle::NoBarrier_Store(&run_duration_max_, 0); | 273 base::subtle::NoBarrier_Store(&run_duration_max_, 0); |
| 250 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); | 274 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); |
| 251 } | 275 } |
| 252 | 276 |
| 277 int64_t DeathData::CumulativeByteCountRead(const CumulativeByteCount* count) { | |
| 278 #if defined(ARCH_CPU_64_BITS) | |
| 279 return *count; | |
| 280 #else | |
| 281 return static_cast<int64_t>(count->hi_word) << 32 | | |
| 282 static_cast<uint32_t>(count->lo_word); | |
| 283 #endif | |
| 284 } | |
| 285 | |
| 286 int64_t DeathData::ConsistentCumulativeByteCountRead( | |
| 287 const CumulativeByteCount* count) const { | |
| 288 #if defined(ARCH_CPU_64_BITS) | |
| 289 return base::subtle::NoBarrier_Load(count); | |
| 290 #else | |
| 291 // We're on a 32 bit system, this is going to be complicated. | |
| 292 while (true) { | |
|
gab
2017/05/02 21:17:12
This can theoretically starve readers. I assume th
Sigurður Ásgeirsson
2017/05/03 14:10:10
I don't know where you'd see this documented, but
| |
| 293 int32_t update_counter = 0; | |
| 294 // Acquire the starting count, spin until it's even. | |
| 295 do { | |
| 296 update_counter = base::subtle::NoBarrier_Load(&byte_update_counter_); | |
| 297 } while (update_counter & 1); | |
|
gab
2017/05/02 21:17:12
PlatformThread::YieldCurrentThread() after each fa
Sigurður Ásgeirsson
2017/05/03 14:10:10
Mmmm, I added a spin count to yield. These reads h
| |
| 298 | |
| 299 DCHECK_EQ(update_counter & 1, 0); | |
| 300 | |
| 301 int64_t value = | |
| 302 static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word)) | |
| 303 << 32 | | |
| 304 static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word)); | |
|
gab
2017/05/02 21:17:12
I'm not convinced that NoBarrier is what you want
Sigurður Ásgeirsson
2017/05/03 14:10:10
The final (now-)increment to the guarding counter
| |
| 305 | |
| 306 // If the count has not changed, the read is consistent. | |
| 307 // Otherwise go around and try again. | |
| 308 if (update_counter == base::subtle::NoBarrier_Load(&byte_update_counter_)) | |
|
gab
2017/05/02 21:17:12
Can't this check see the same value twice while th
Sigurður Ásgeirsson
2017/05/03 14:10:10
You're absolutely right and that was my intent. I
| |
| 309 return value; | |
| 310 } | |
| 311 #endif | |
| 312 } | |
| 313 | |
| 253 void DeathData::SaturatingMemberAdd(const uint32_t addend, | 314 void DeathData::SaturatingMemberAdd(const uint32_t addend, |
| 254 base::subtle::Atomic32* sum) { | 315 base::subtle::Atomic32* sum) { |
| 255 // Bail quick if no work or already saturated. | 316 // Bail quick if no work or already saturated. |
| 256 if (addend == 0U || *sum == INT_MAX) | 317 if (addend == 0U || *sum == INT_MAX) |
| 257 return; | 318 return; |
| 258 | 319 |
| 259 base::CheckedNumeric<int32_t> new_sum = *sum; | 320 base::CheckedNumeric<int32_t> new_sum = *sum; |
| 260 new_sum += addend; | 321 new_sum += addend; |
| 261 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); | 322 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); |
| 262 } | 323 } |
| 263 | 324 |
| 325 void DeathData::SaturatingByteCountMemberAdd(const uint32_t addend, | |
| 326 CumulativeByteCount* sum) { | |
| 327 // Bail quick if no work or already saturated. | |
| 328 if (addend == 0U || CumulativeByteCountRead(sum) == LONG_MAX) | |
|
gab
2017/05/02 21:17:12
Use std::numeric_limits<int64_t>::max() instead of
Sigurður Ásgeirsson
2017/05/03 14:10:10
Ah, new hotness, thanks!
| |
| 329 return; | |
| 330 | |
| 331 base::CheckedNumeric<int64_t> new_sum = CumulativeByteCountRead(sum); | |
| 332 new_sum += addend; | |
| 333 int64_t new_value = new_sum.ValueOrDefault(LONG_MAX); | |
| 334 // Update our value. | |
| 335 #if defined(ARCH_CPU_64_BITS) | |
| 336 base::subtle::NoBarrier_Store(sum, new_value); | |
| 337 #else | |
| 338 base::subtle::NoBarrier_Store(&sum->hi_word, | |
| 339 static_cast<int32_t>(new_value >> 32)); | |
| 340 base::subtle::NoBarrier_Store(&sum->lo_word, | |
| 341 static_cast<int32_t>(new_value & 0xFFFFFFFF)); | |
| 342 #endif | |
| 343 } | |
| 344 | |
| 264 //------------------------------------------------------------------------------ | 345 //------------------------------------------------------------------------------ |
| 265 DeathDataSnapshot::DeathDataSnapshot() | 346 DeathDataSnapshot::DeathDataSnapshot() |
| 266 : count(-1), | 347 : count(-1), |
| 267 run_duration_sum(-1), | 348 run_duration_sum(-1), |
| 268 run_duration_max(-1), | 349 run_duration_max(-1), |
| 269 run_duration_sample(-1), | 350 run_duration_sample(-1), |
| 270 queue_duration_sum(-1), | 351 queue_duration_sum(-1), |
| 271 queue_duration_max(-1), | 352 queue_duration_max(-1), |
| 272 queue_duration_sample(-1), | 353 queue_duration_sample(-1), |
| 273 alloc_ops(-1), | 354 alloc_ops(-1), |
| 274 free_ops(-1), | 355 free_ops(-1), |
| 275 allocated_bytes(-1), | 356 allocated_bytes(-1), |
| 276 freed_bytes(-1), | 357 freed_bytes(-1), |
| 277 alloc_overhead_bytes(-1), | 358 alloc_overhead_bytes(-1), |
| 278 max_allocated_bytes(-1) {} | 359 max_allocated_bytes(-1) {} |
| 279 | 360 |
| 280 DeathDataSnapshot::DeathDataSnapshot(int count, | 361 DeathDataSnapshot::DeathDataSnapshot(int count, |
| 281 int32_t run_duration_sum, | 362 int32_t run_duration_sum, |
| 282 int32_t run_duration_max, | 363 int32_t run_duration_max, |
| 283 int32_t run_duration_sample, | 364 int32_t run_duration_sample, |
| 284 int32_t queue_duration_sum, | 365 int32_t queue_duration_sum, |
| 285 int32_t queue_duration_max, | 366 int32_t queue_duration_max, |
| 286 int32_t queue_duration_sample, | 367 int32_t queue_duration_sample, |
| 287 int32_t alloc_ops, | 368 int32_t alloc_ops, |
| 288 int32_t free_ops, | 369 int32_t free_ops, |
| 289 int32_t allocated_bytes, | 370 int64_t allocated_bytes, |
| 290 int32_t freed_bytes, | 371 int64_t freed_bytes, |
| 291 int32_t alloc_overhead_bytes, | 372 int64_t alloc_overhead_bytes, |
| 292 int32_t max_allocated_bytes) | 373 int32_t max_allocated_bytes) |
| 293 : count(count), | 374 : count(count), |
| 294 run_duration_sum(run_duration_sum), | 375 run_duration_sum(run_duration_sum), |
| 295 run_duration_max(run_duration_max), | 376 run_duration_max(run_duration_max), |
| 296 run_duration_sample(run_duration_sample), | 377 run_duration_sample(run_duration_sample), |
| 297 queue_duration_sum(queue_duration_sum), | 378 queue_duration_sum(queue_duration_sum), |
| 298 queue_duration_max(queue_duration_max), | 379 queue_duration_max(queue_duration_max), |
| 299 queue_duration_sample(queue_duration_sample), | 380 queue_duration_sample(queue_duration_sample), |
| 300 alloc_ops(alloc_ops), | 381 alloc_ops(alloc_ops), |
| 301 free_ops(free_ops), | 382 free_ops(free_ops), |
| (...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1076 #endif | 1157 #endif |
| 1077 } | 1158 } |
| 1078 | 1159 |
| 1079 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = | 1160 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = |
| 1080 default; | 1161 default; |
| 1081 | 1162 |
| 1082 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1163 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 1083 } | 1164 } |
| 1084 | 1165 |
| 1085 } // namespace tracked_objects | 1166 } // namespace tracked_objects |
| OLD | NEW |