Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/tracked_objects.h" | 5 #include "base/tracked_objects.h" |
| 6 | 6 |
| 7 #include <ctype.h> | 7 #include <ctype.h> |
| 8 #include <limits.h> | 8 #include <limits.h> |
| 9 #include <stdlib.h> | 9 #include <stdlib.h> |
| 10 | 10 |
| (...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 103 | 103 |
| 104 DeathData::DeathData() | 104 DeathData::DeathData() |
| 105 : count_(0), | 105 : count_(0), |
| 106 sample_probability_count_(0), | 106 sample_probability_count_(0), |
| 107 run_duration_sum_(0), | 107 run_duration_sum_(0), |
| 108 queue_duration_sum_(0), | 108 queue_duration_sum_(0), |
| 109 run_duration_max_(0), | 109 run_duration_max_(0), |
| 110 queue_duration_max_(0), | 110 queue_duration_max_(0), |
| 111 alloc_ops_(0), | 111 alloc_ops_(0), |
| 112 free_ops_(0), | 112 free_ops_(0), |
| 113 allocated_bytes_(0), | 113 #if !defined(ARCH_CPU_64_BITS) |
| 114 freed_bytes_(0), | 114 byte_update_counter_(0), |
| 115 alloc_overhead_bytes_(0), | 115 #endif |
| 116 allocated_bytes_(), | |
| 117 freed_bytes_(), | |
| 118 alloc_overhead_bytes_(), | |
| 116 max_allocated_bytes_(0), | 119 max_allocated_bytes_(0), |
| 117 run_duration_sample_(0), | 120 run_duration_sample_(0), |
| 118 queue_duration_sample_(0), | 121 queue_duration_sample_(0), |
| 119 last_phase_snapshot_(nullptr) {} | 122 last_phase_snapshot_(nullptr) { |
| 123 } | |
| 120 | 124 |
| 121 DeathData::DeathData(const DeathData& other) | 125 DeathData::DeathData(const DeathData& other) |
| 122 : count_(other.count_), | 126 : count_(other.count_), |
| 123 sample_probability_count_(other.sample_probability_count_), | 127 sample_probability_count_(other.sample_probability_count_), |
| 124 run_duration_sum_(other.run_duration_sum_), | 128 run_duration_sum_(other.run_duration_sum_), |
| 125 queue_duration_sum_(other.queue_duration_sum_), | 129 queue_duration_sum_(other.queue_duration_sum_), |
| 126 run_duration_max_(other.run_duration_max_), | 130 run_duration_max_(other.run_duration_max_), |
| 127 queue_duration_max_(other.queue_duration_max_), | 131 queue_duration_max_(other.queue_duration_max_), |
| 128 alloc_ops_(other.alloc_ops_), | 132 alloc_ops_(other.alloc_ops_), |
| 129 free_ops_(other.free_ops_), | 133 free_ops_(other.free_ops_), |
| 134 #if !defined(ARCH_CPU_64_BITS) | |
| 135 byte_update_counter_(0), | |
| 136 #endif | |
| 130 allocated_bytes_(other.allocated_bytes_), | 137 allocated_bytes_(other.allocated_bytes_), |
| 131 freed_bytes_(other.freed_bytes_), | 138 freed_bytes_(other.freed_bytes_), |
| 132 alloc_overhead_bytes_(other.alloc_overhead_bytes_), | 139 alloc_overhead_bytes_(other.alloc_overhead_bytes_), |
| 133 max_allocated_bytes_(other.max_allocated_bytes_), | 140 max_allocated_bytes_(other.max_allocated_bytes_), |
| 134 run_duration_sample_(other.run_duration_sample_), | 141 run_duration_sample_(other.run_duration_sample_), |
| 135 queue_duration_sample_(other.queue_duration_sample_), | 142 queue_duration_sample_(other.queue_duration_sample_), |
| 136 last_phase_snapshot_(nullptr) { | 143 last_phase_snapshot_(nullptr) { |
| 137 // This constructor will be used by std::map when adding new DeathData values | 144 // This constructor will be used by std::map when adding new DeathData values |
| 138 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't | 145 // to the map. At that point, last_phase_snapshot_ is still NULL, so we don't |
| 139 // need to worry about ownership transfer. | 146 // need to worry about ownership transfer. |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 196 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); | 203 base::subtle::NoBarrier_Store(&run_duration_sample_, run_duration); |
| 197 } | 204 } |
| 198 } | 205 } |
| 199 | 206 |
| 200 void DeathData::RecordAllocations(const uint32_t alloc_ops, | 207 void DeathData::RecordAllocations(const uint32_t alloc_ops, |
| 201 const uint32_t free_ops, | 208 const uint32_t free_ops, |
| 202 const uint32_t allocated_bytes, | 209 const uint32_t allocated_bytes, |
| 203 const uint32_t freed_bytes, | 210 const uint32_t freed_bytes, |
| 204 const uint32_t alloc_overhead_bytes, | 211 const uint32_t alloc_overhead_bytes, |
| 205 const uint32_t max_allocated_bytes) { | 212 const uint32_t max_allocated_bytes) { |
| 213 #if !defined(ARCH_CPU_64_BITS) | |
| 214 // On 32 bit systems, we use an even/odd locking scheme to make possible to | |
| 215 // read 64 bit sums consistently. | |
|
chrisha
2017/05/02 18:48:32
Maybe a small repeat of the fact that *writing* ca
Sigurður Ásgeirsson
2017/05/02 18:53:28
Done.
| |
| 216 int32_t counter_val = | |
| 217 base::subtle::NoBarrier_AtomicIncrement(&byte_update_counter_, 1); | |
| 218 // The counter must be odd. | |
| 219 DCHECK_EQ(1, counter_val & 1); | |
| 220 #endif | |
| 221 | |
| 206 // Use saturating arithmetic. | 222 // Use saturating arithmetic. |
| 207 SaturatingMemberAdd(alloc_ops, &alloc_ops_); | 223 SaturatingMemberAdd(alloc_ops, &alloc_ops_); |
| 208 SaturatingMemberAdd(free_ops, &free_ops_); | 224 SaturatingMemberAdd(free_ops, &free_ops_); |
| 209 SaturatingMemberAdd(allocated_bytes, &allocated_bytes_); | 225 SaturatingByteCountMemberAdd(allocated_bytes, &allocated_bytes_); |
| 210 SaturatingMemberAdd(freed_bytes, &freed_bytes_); | 226 SaturatingByteCountMemberAdd(freed_bytes, &freed_bytes_); |
| 211 SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); | 227 SaturatingByteCountMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_); |
| 212 | 228 |
| 213 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); | 229 int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes); |
| 214 if (max > max_allocated_bytes_) | 230 if (max > max_allocated_bytes_) |
| 215 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); | 231 base::subtle::NoBarrier_Store(&max_allocated_bytes_, max); |
| 232 | |
| 233 #if !defined(ARCH_CPU_64_BITS) | |
| 234 // Now release the value while rolling to even. | |
| 235 counter_val = | |
| 236 base::subtle::Barrier_AtomicIncrement(&byte_update_counter_, -1); | |
| 237 DCHECK_EQ(0, counter_val & 1); | |
| 238 #endif | |
| 216 } | 239 } |
| 217 | 240 |
| 218 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { | 241 void DeathData::OnProfilingPhaseCompleted(int profiling_phase) { |
| 219 // Snapshotting and storing current state. | 242 // Snapshotting and storing current state. |
| 220 last_phase_snapshot_ = | 243 last_phase_snapshot_ = |
| 221 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); | 244 new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_); |
| 222 | 245 |
| 223 // Not touching fields for which a delta can be computed by comparing with a | 246 // Not touching fields for which a delta can be computed by comparing with a |
| 224 // snapshot from the previous phase. Resetting other fields. Sample values | 247 // snapshot from the previous phase. Resetting other fields. Sample values |
| 225 // will be reset upon next death recording because sample_probability_count_ | 248 // will be reset upon next death recording because sample_probability_count_ |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 243 // The damage is limited to selecting a wrong sample, which is not something | 266 // The damage is limited to selecting a wrong sample, which is not something |
| 244 // that can cause accumulating or cascading effects. | 267 // that can cause accumulating or cascading effects. |
| 245 // If there were no inconsistencies caused by race conditions, we never send a | 268 // If there were no inconsistencies caused by race conditions, we never send a |
| 246 // sample for the previous phase in the next phase's snapshot because | 269 // sample for the previous phase in the next phase's snapshot because |
| 247 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. | 270 // ThreadData::SnapshotExecutedTasks doesn't send deltas with 0 count. |
| 248 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); | 271 base::subtle::NoBarrier_Store(&sample_probability_count_, 0); |
| 249 base::subtle::NoBarrier_Store(&run_duration_max_, 0); | 272 base::subtle::NoBarrier_Store(&run_duration_max_, 0); |
| 250 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); | 273 base::subtle::NoBarrier_Store(&queue_duration_max_, 0); |
| 251 } | 274 } |
| 252 | 275 |
| 276 int64_t DeathData::CumulativeByteCountRead(const CumulativeByteCount* count) { | |
| 277 #if defined(ARCH_CPU_64_BITS) | |
| 278 return count; | |
| 279 #else | |
| 280 return static_cast<int64_t>(count->hi_word) << 32 | | |
| 281 static_cast<uint32_t>(count->lo_word); | |
| 282 #endif | |
| 283 } | |
| 284 | |
| 285 int64_t DeathData::ConsistentCumulativeByteCountRead( | |
| 286 const CumulativeByteCount* count) const { | |
| 287 #if defined(ARCH_CPU_64_BITS) | |
| 288 return base::subtle::NoBarrier_Load(count); | |
| 289 #else | |
| 290 // We're on a 32 bit system, this is going to be complicated. | |
| 291 while (true) { | |
| 292 int32_t update_counter = 0; | |
| 293 // Acquire the starting count, spin until it's even. | |
| 294 do { | |
| 295 update_counter = base::subtle::NoBarrier_Load(&byte_update_counter_); | |
| 296 } while (update_counter & 1); | |
| 297 | |
| 298 DCHECK_EQ(update_counter & 1, 0); | |
| 299 | |
| 300 int64_t value = | |
| 301 static_cast<int64_t>(base::subtle::NoBarrier_Load(&count->hi_word)) | |
| 302 << 32 | | |
| 303 static_cast<uint32_t>(base::subtle::NoBarrier_Load(&count->lo_word)); | |
| 304 | |
| 305 // If the count has not changed, the read is consistent. | |
| 306 // Otherwise go around and try again. | |
| 307 if (update_counter == base::subtle::NoBarrier_Load(&byte_update_counter_)) | |
| 308 return value; | |
| 309 } | |
| 310 #endif | |
| 311 } | |
| 312 | |
| 253 void DeathData::SaturatingMemberAdd(const uint32_t addend, | 313 void DeathData::SaturatingMemberAdd(const uint32_t addend, |
| 254 base::subtle::Atomic32* sum) { | 314 base::subtle::Atomic32* sum) { |
| 255 // Bail quick if no work or already saturated. | 315 // Bail quick if no work or already saturated. |
| 256 if (addend == 0U || *sum == INT_MAX) | 316 if (addend == 0U || *sum == INT_MAX) |
| 257 return; | 317 return; |
| 258 | 318 |
| 259 base::CheckedNumeric<int32_t> new_sum = *sum; | 319 base::CheckedNumeric<int32_t> new_sum = *sum; |
| 260 new_sum += addend; | 320 new_sum += addend; |
| 261 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); | 321 base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX)); |
| 262 } | 322 } |
| 263 | 323 |
| 324 void DeathData::SaturatingByteCountMemberAdd(const uint32_t addend, | |
| 325 CumulativeByteCount* sum) { | |
| 326 // Bail quick if no work or already saturated. | |
| 327 if (addend == 0U || CumulativeByteCountRead(sum) == LONG_MAX) | |
| 328 return; | |
| 329 | |
| 330 base::CheckedNumeric<int64_t> new_sum = CumulativeByteCountRead(sum); | |
| 331 new_sum += addend; | |
| 332 int64_t new_value = new_sum.ValueOrDefault(LONG_MAX); | |
| 333 // Update our value. | |
| 334 #if defined(ARCH_CPU_64_BITS) | |
| 335 base::subtle::NoBarrier_Store(sum, new_value); | |
| 336 #else | |
| 337 base::subtle::NoBarrier_Store(&sum->hi_word, | |
| 338 static_cast<int32_t>(new_value >> 32)); | |
| 339 base::subtle::NoBarrier_Store(&sum->lo_word, | |
| 340 static_cast<int32_t>(new_value & 0xFFFFFFFF)); | |
| 341 #endif | |
| 342 } | |
| 343 | |
| 264 //------------------------------------------------------------------------------ | 344 //------------------------------------------------------------------------------ |
| 265 DeathDataSnapshot::DeathDataSnapshot() | 345 DeathDataSnapshot::DeathDataSnapshot() |
| 266 : count(-1), | 346 : count(-1), |
| 267 run_duration_sum(-1), | 347 run_duration_sum(-1), |
| 268 run_duration_max(-1), | 348 run_duration_max(-1), |
| 269 run_duration_sample(-1), | 349 run_duration_sample(-1), |
| 270 queue_duration_sum(-1), | 350 queue_duration_sum(-1), |
| 271 queue_duration_max(-1), | 351 queue_duration_max(-1), |
| 272 queue_duration_sample(-1), | 352 queue_duration_sample(-1), |
| 273 alloc_ops(-1), | 353 alloc_ops(-1), |
| 274 free_ops(-1), | 354 free_ops(-1), |
| 275 allocated_bytes(-1), | 355 allocated_bytes(-1), |
| 276 freed_bytes(-1), | 356 freed_bytes(-1), |
| 277 alloc_overhead_bytes(-1), | 357 alloc_overhead_bytes(-1), |
| 278 max_allocated_bytes(-1) {} | 358 max_allocated_bytes(-1) {} |
| 279 | 359 |
| 280 DeathDataSnapshot::DeathDataSnapshot(int count, | 360 DeathDataSnapshot::DeathDataSnapshot(int count, |
| 281 int32_t run_duration_sum, | 361 int32_t run_duration_sum, |
| 282 int32_t run_duration_max, | 362 int32_t run_duration_max, |
| 283 int32_t run_duration_sample, | 363 int32_t run_duration_sample, |
| 284 int32_t queue_duration_sum, | 364 int32_t queue_duration_sum, |
| 285 int32_t queue_duration_max, | 365 int32_t queue_duration_max, |
| 286 int32_t queue_duration_sample, | 366 int32_t queue_duration_sample, |
| 287 int32_t alloc_ops, | 367 int32_t alloc_ops, |
| 288 int32_t free_ops, | 368 int32_t free_ops, |
| 289 int32_t allocated_bytes, | 369 int64_t allocated_bytes, |
| 290 int32_t freed_bytes, | 370 int64_t freed_bytes, |
| 291 int32_t alloc_overhead_bytes, | 371 int64_t alloc_overhead_bytes, |
| 292 int32_t max_allocated_bytes) | 372 int32_t max_allocated_bytes) |
| 293 : count(count), | 373 : count(count), |
| 294 run_duration_sum(run_duration_sum), | 374 run_duration_sum(run_duration_sum), |
| 295 run_duration_max(run_duration_max), | 375 run_duration_max(run_duration_max), |
| 296 run_duration_sample(run_duration_sample), | 376 run_duration_sample(run_duration_sample), |
| 297 queue_duration_sum(queue_duration_sum), | 377 queue_duration_sum(queue_duration_sum), |
| 298 queue_duration_max(queue_duration_max), | 378 queue_duration_max(queue_duration_max), |
| 299 queue_duration_sample(queue_duration_sample), | 379 queue_duration_sample(queue_duration_sample), |
| 300 alloc_ops(alloc_ops), | 380 alloc_ops(alloc_ops), |
| 301 free_ops(free_ops), | 381 free_ops(free_ops), |
| (...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1076 #endif | 1156 #endif |
| 1077 } | 1157 } |
| 1078 | 1158 |
| 1079 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = | 1159 ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) = |
| 1080 default; | 1160 default; |
| 1081 | 1161 |
| 1082 ProcessDataSnapshot::~ProcessDataSnapshot() { | 1162 ProcessDataSnapshot::~ProcessDataSnapshot() { |
| 1083 } | 1163 } |
| 1084 | 1164 |
| 1085 } // namespace tracked_objects | 1165 } // namespace tracked_objects |
| OLD | NEW |