Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef BASE_TRACKED_OBJECTS_H_ | 5 #ifndef BASE_TRACKED_OBJECTS_H_ |
| 6 #define BASE_TRACKED_OBJECTS_H_ | 6 #define BASE_TRACKED_OBJECTS_H_ |
| 7 | 7 |
| 8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 | 9 |
| 10 #include <map> | 10 #include <map> |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 21 #include "base/containers/hash_tables.h" | 21 #include "base/containers/hash_tables.h" |
| 22 #include "base/debug/debugging_flags.h" | 22 #include "base/debug/debugging_flags.h" |
| 23 #include "base/debug/thread_heap_usage_tracker.h" | 23 #include "base/debug/thread_heap_usage_tracker.h" |
| 24 #include "base/gtest_prod_util.h" | 24 #include "base/gtest_prod_util.h" |
| 25 #include "base/lazy_instance.h" | 25 #include "base/lazy_instance.h" |
| 26 #include "base/location.h" | 26 #include "base/location.h" |
| 27 #include "base/macros.h" | 27 #include "base/macros.h" |
| 28 #include "base/process/process_handle.h" | 28 #include "base/process/process_handle.h" |
| 29 #include "base/profiler/tracked_time.h" | 29 #include "base/profiler/tracked_time.h" |
| 30 #include "base/synchronization/lock.h" | 30 #include "base/synchronization/lock.h" |
| 31 #include "base/threading/thread_checker.h" | |
| 32 #include "base/threading/thread_local_storage.h" | 31 #include "base/threading/thread_local_storage.h" |
| 33 | 32 |
| 34 namespace base { | 33 namespace base { |
| 35 struct TrackingInfo; | 34 struct TrackingInfo; |
| 36 } | 35 } |
| 37 | 36 |
| 38 // TrackedObjects provides a database of stats about objects (generally Tasks) | 37 // TrackedObjects provides a database of stats about objects (generally Tasks) |
| 39 // that are tracked. Tracking means their birth, death, duration, birth thread, | 38 // that are tracked. Tracking means their birth, death, duration, birth thread, |
| 40 // death thread, and birth place are recorded. This data is carefully spread | 39 // death thread, and birth place are recorded. This data is carefully spread |
| 41 // across a series of objects so that the counts and times can be rapidly | 40 // across a series of objects so that the counts and times can be rapidly |
| (...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 273 // snapshotting DeathData would be less efficient. | 272 // snapshotting DeathData would be less efficient. |
| 274 DeathDataSnapshot(int count, | 273 DeathDataSnapshot(int count, |
| 275 int32_t run_duration_sum, | 274 int32_t run_duration_sum, |
| 276 int32_t run_duration_max, | 275 int32_t run_duration_max, |
| 277 int32_t run_duration_sample, | 276 int32_t run_duration_sample, |
| 278 int32_t queue_duration_sum, | 277 int32_t queue_duration_sum, |
| 279 int32_t queue_duration_max, | 278 int32_t queue_duration_max, |
| 280 int32_t queue_duration_sample, | 279 int32_t queue_duration_sample, |
| 281 int32_t alloc_ops, | 280 int32_t alloc_ops, |
| 282 int32_t free_ops, | 281 int32_t free_ops, |
| 283 int32_t allocated_bytes, | 282 int64_t allocated_bytes, |
| 284 int32_t freed_bytes, | 283 int64_t freed_bytes, |
| 285 int32_t alloc_overhead_bytes, | 284 int64_t alloc_overhead_bytes, |
| 286 int32_t max_allocated_bytes); | 285 int32_t max_allocated_bytes); |
| 287 DeathDataSnapshot(const DeathData& death_data); | 286 DeathDataSnapshot(const DeathData& death_data); |
| 288 DeathDataSnapshot(const DeathDataSnapshot& other); | 287 DeathDataSnapshot(const DeathDataSnapshot& other); |
| 289 ~DeathDataSnapshot(); | 288 ~DeathDataSnapshot(); |
| 290 | 289 |
| 291 // Calculates and returns the delta between this snapshot and an earlier | 290 // Calculates and returns the delta between this snapshot and an earlier |
| 292 // snapshot of the same task |older|. | 291 // snapshot of the same task |older|. |
| 293 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; | 292 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; |
| 294 | 293 |
| 295 int count; | 294 int count; |
| 296 int32_t run_duration_sum; | 295 int32_t run_duration_sum; |
| 297 int32_t run_duration_max; | 296 int32_t run_duration_max; |
| 298 int32_t run_duration_sample; | 297 int32_t run_duration_sample; |
| 299 int32_t queue_duration_sum; | 298 int32_t queue_duration_sum; |
| 300 int32_t queue_duration_max; | 299 int32_t queue_duration_max; |
| 301 int32_t queue_duration_sample; | 300 int32_t queue_duration_sample; |
| 302 | 301 |
| 303 int32_t alloc_ops; | 302 int32_t alloc_ops; |
| 304 int32_t free_ops; | 303 int32_t free_ops; |
| 305 int32_t allocated_bytes; | 304 int64_t allocated_bytes; |
| 306 int32_t freed_bytes; | 305 int64_t freed_bytes; |
| 307 int32_t alloc_overhead_bytes; | 306 int64_t alloc_overhead_bytes; |
| 308 int32_t max_allocated_bytes; | 307 int32_t max_allocated_bytes; |
| 309 }; | 308 }; |
| 310 | 309 |
| 311 //------------------------------------------------------------------------------ | 310 //------------------------------------------------------------------------------ |
| 312 // A "snapshotted" representation of the DeathData for a particular profiling | 311 // A "snapshotted" representation of the DeathData for a particular profiling |
| 313 // phase. Used as an element of the list of phase snapshots owned by DeathData. | 312 // phase. Used as an element of the list of phase snapshots owned by DeathData. |
| 314 | 313 |
| 315 struct DeathDataPhaseSnapshot { | 314 struct DeathDataPhaseSnapshot { |
| 316 DeathDataPhaseSnapshot(int profiling_phase, | 315 DeathDataPhaseSnapshot(int profiling_phase, |
| 317 const DeathData& death_data, | 316 const DeathData& death_data, |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 386 int32_t queue_duration_max() const { | 385 int32_t queue_duration_max() const { |
| 387 return base::subtle::NoBarrier_Load(&queue_duration_max_); | 386 return base::subtle::NoBarrier_Load(&queue_duration_max_); |
| 388 } | 387 } |
| 389 int32_t queue_duration_sample() const { | 388 int32_t queue_duration_sample() const { |
| 390 return base::subtle::NoBarrier_Load(&queue_duration_sample_); | 389 return base::subtle::NoBarrier_Load(&queue_duration_sample_); |
| 391 } | 390 } |
| 392 int32_t alloc_ops() const { | 391 int32_t alloc_ops() const { |
| 393 return base::subtle::NoBarrier_Load(&alloc_ops_); | 392 return base::subtle::NoBarrier_Load(&alloc_ops_); |
| 394 } | 393 } |
| 395 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); } | 394 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); } |
| 396 int32_t allocated_bytes() const { | 395 int64_t allocated_bytes() const { |
| 397 return base::subtle::NoBarrier_Load(&allocated_bytes_); | 396 return ConsistentCumulativeByteCountRead(&allocated_bytes_); |
| 398 } | 397 } |
| 399 int32_t freed_bytes() const { | 398 int64_t freed_bytes() const { |
| 400 return base::subtle::NoBarrier_Load(&freed_bytes_); | 399 return ConsistentCumulativeByteCountRead(&freed_bytes_); |
| 401 } | 400 } |
| 402 int32_t alloc_overhead_bytes() const { | 401 int64_t alloc_overhead_bytes() const { |
| 403 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_); | 402 return ConsistentCumulativeByteCountRead(&alloc_overhead_bytes_); |
| 404 } | 403 } |
| 405 int32_t max_allocated_bytes() const { | 404 int64_t max_allocated_bytes() const { |
| 406 return base::subtle::NoBarrier_Load(&max_allocated_bytes_); | 405 return base::subtle::NoBarrier_Load(&max_allocated_bytes_); |
| 407 } | 406 } |
| 408 const DeathDataPhaseSnapshot* last_phase_snapshot() const { | 407 const DeathDataPhaseSnapshot* last_phase_snapshot() const { |
| 409 return last_phase_snapshot_; | 408 return last_phase_snapshot_; |
| 410 } | 409 } |
| 411 | 410 |
| 412 // Called when the current profiling phase, identified by |profiling_phase|, | 411 // Called when the current profiling phase, identified by |profiling_phase|, |
| 413 // ends. | 412 // ends. |
| 414 // Must be called only on the snapshot thread. | 413 // Must be called only on the snapshot thread. |
| 415 void OnProfilingPhaseCompleted(int profiling_phase); | 414 void OnProfilingPhaseCompleted(int profiling_phase); |
| 416 | 415 |
| 417 private: | 416 private: |
| 417 #if defined(ARCH_CPU_64_BITS) | |
| 418 using CumulativeByteCount = base::subtle::Atomic64; | |
| 419 #else | |
| 420 struct CumulativeByteCount { | |
| 421 base::subtle::Atomic32 hi_word; | |
| 422 base::subtle::Atomic32 lo_word; | |
| 423 }; | |
| 424 #endif | |
| 425 | |
| 426 // Reads a cumulative byte counter consistently. | |
| 427 int64_t ConsistentCumulativeByteCountRead( | |
| 428 const CumulativeByteCount* count) const; | |
| 429 | |
| 430 // Reads the value of a cumulative byte count, only returns consistent | |
| 431 // results on the owning thread. | |
| 432 static int64_t CumulativeByteCountRead(const CumulativeByteCount* count); | |
|
gab
2017/05/03 19:40:33
"UnsafeCumulativeByteCountRead" to explicitly docu
Sigurður Ásgeirsson
2017/05/03 20:10:10
Done.
| |
| 433 | |
| 418 // A saturating addition operation for member variables. This elides the | 434 // A saturating addition operation for member variables. This elides the |
| 419 // use of atomic-primitive reads for members that are only written on the | 435 // use of atomic-primitive reads for members that are only written on the |
| 420 // owning thread. | 436 // owning thread. |
| 421 static void SaturatingMemberAdd(const uint32_t addend, | 437 static void SaturatingMemberAdd(const uint32_t addend, |
| 422 base::subtle::Atomic32* sum); | 438 base::subtle::Atomic32* sum); |
| 423 | 439 |
| 440 void SaturatingByteCountMemberAdd(const uint32_t addend, | |
|
gab
2017/05/03 19:40:33
Document this method
Sigurður Ásgeirsson
2017/05/03 20:10:10
Done.
| |
| 441 CumulativeByteCount* sum); | |
| 442 | |
| 424 // Members are ordered from most regularly read and updated, to least | 443 // Members are ordered from most regularly read and updated, to least |
| 425 // frequently used. This might help a bit with cache lines. | 444 // frequently used. This might help a bit with cache lines. |
| 426 // Number of runs seen (divisor for calculating averages). | 445 // Number of runs seen (divisor for calculating averages). |
| 427 // Can be incremented only on the death thread. | 446 // Can be incremented only on the death thread. |
| 428 base::subtle::Atomic32 count_; | 447 base::subtle::Atomic32 count_; |
| 429 | 448 |
| 430 // Count used in determining probability of selecting exec/queue times from a | 449 // Count used in determining probability of selecting exec/queue times from a |
| 431 // recorded death as samples. | 450 // recorded death as samples. |
| 432 // Gets incremented only on the death thread, but can be set to 0 by | 451 // Gets incremented only on the death thread, but can be set to 0 by |
| 433 // OnProfilingPhaseCompleted() on the snapshot thread. | 452 // OnProfilingPhaseCompleted() on the snapshot thread. |
| 434 base::subtle::Atomic32 sample_probability_count_; | 453 base::subtle::Atomic32 sample_probability_count_; |
| 435 | 454 |
| 436 // Basic tallies, used to compute averages. Can be incremented only on the | 455 // Basic tallies, used to compute averages. Can be incremented only on the |
| 437 // death thread. | 456 // death thread. |
| 438 base::subtle::Atomic32 run_duration_sum_; | 457 base::subtle::Atomic32 run_duration_sum_; |
| 439 base::subtle::Atomic32 queue_duration_sum_; | 458 base::subtle::Atomic32 queue_duration_sum_; |
| 440 // Max values, used by local visualization routines. These are often read, | 459 // Max values, used by local visualization routines. These are often read, |
| 441 // but rarely updated. The max values get assigned only on the death thread, | 460 // but rarely updated. The max values get assigned only on the death thread, |
| 442 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the | 461 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the |
| 443 // snapshot thread. | 462 // snapshot thread. |
| 444 base::subtle::Atomic32 run_duration_max_; | 463 base::subtle::Atomic32 run_duration_max_; |
| 445 base::subtle::Atomic32 queue_duration_max_; | 464 base::subtle::Atomic32 queue_duration_max_; |
| 446 | 465 |
| 447 // The cumulative number of allocation and free operations. | 466 // The cumulative number of allocation and free operations. |
| 448 base::subtle::Atomic32 alloc_ops_; | 467 base::subtle::Atomic32 alloc_ops_; |
| 449 base::subtle::Atomic32 free_ops_; | 468 base::subtle::Atomic32 free_ops_; |
| 450 | 469 |
| 470 #if !defined(ARCH_CPU_64_BITS) | |
| 471 // On 32 bit systems this is used to achieve consistent reads for cumulative | |
| 472 // byte counts. This is odd while updates are in progress, and even while | |
| 473 // quiescent. If this has the same value before and after reading the | |
| 474 // cumulative counts, the read is consistent. | |
| 475 base::subtle::Atomic32 byte_update_counter_; | |
| 476 #endif | |
| 477 | |
| 451 // The number of bytes allocated by the task. | 478 // The number of bytes allocated by the task. |
| 452 base::subtle::Atomic32 allocated_bytes_; | 479 CumulativeByteCount allocated_bytes_; |
| 453 | 480 |
| 454 // The number of bytes freed by the task. | 481 // The number of bytes freed by the task. |
| 455 base::subtle::Atomic32 freed_bytes_; | 482 CumulativeByteCount freed_bytes_; |
| 456 | 483 |
| 457 // The cumulative number of overhead bytes. Where available this yields an | 484 // The cumulative number of overhead bytes. Where available this yields an |
| 458 // estimate of the heap overhead for allocations. | 485 // estimate of the heap overhead for allocations. |
| 459 base::subtle::Atomic32 alloc_overhead_bytes_; | 486 CumulativeByteCount alloc_overhead_bytes_; |
| 460 | 487 |
| 461 // The high-watermark for the number of outstanding heap allocated bytes. | 488 // The high-watermark for the number of outstanding heap allocated bytes. |
| 462 base::subtle::Atomic32 max_allocated_bytes_; | 489 base::subtle::Atomic32 max_allocated_bytes_; |
| 463 | 490 |
| 464 // Samples, used by crowd sourcing gatherers. These are almost never read, | 491 // Samples, used by crowd sourcing gatherers. These are almost never read, |
| 465 // and rarely updated. They can be modified only on the death thread. | 492 // and rarely updated. They can be modified only on the death thread. |
| 466 base::subtle::Atomic32 run_duration_sample_; | 493 base::subtle::Atomic32 run_duration_sample_; |
| 467 base::subtle::Atomic32 queue_duration_sample_; | 494 base::subtle::Atomic32 queue_duration_sample_; |
| 468 | 495 |
| 469 // Snapshot of this death data made at the last profiling phase completion, if | 496 // Snapshot of this death data made at the last profiling phase completion, if |
| (...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 890 ProcessDataSnapshot(const ProcessDataSnapshot& other); | 917 ProcessDataSnapshot(const ProcessDataSnapshot& other); |
| 891 ~ProcessDataSnapshot(); | 918 ~ProcessDataSnapshot(); |
| 892 | 919 |
| 893 PhasedProcessDataSnapshotMap phased_snapshots; | 920 PhasedProcessDataSnapshotMap phased_snapshots; |
| 894 base::ProcessId process_id; | 921 base::ProcessId process_id; |
| 895 }; | 922 }; |
| 896 | 923 |
| 897 } // namespace tracked_objects | 924 } // namespace tracked_objects |
| 898 | 925 |
| 899 #endif // BASE_TRACKED_OBJECTS_H_ | 926 #endif // BASE_TRACKED_OBJECTS_H_ |
| OLD | NEW |