| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef BASE_TRACKED_OBJECTS_H_ | 5 #ifndef BASE_TRACKED_OBJECTS_H_ |
| 6 #define BASE_TRACKED_OBJECTS_H_ | 6 #define BASE_TRACKED_OBJECTS_H_ |
| 7 | 7 |
| 8 #include <stdint.h> | 8 #include <stdint.h> |
| 9 | 9 |
| 10 #include <map> | 10 #include <map> |
| 11 #include <set> | 11 #include <set> |
| 12 #include <stack> | 12 #include <stack> |
| 13 #include <string> | 13 #include <string> |
| 14 #include <utility> | 14 #include <utility> |
| 15 #include <vector> | 15 #include <vector> |
| 16 | 16 |
| 17 #include "base/allocator/features.h" |
| 17 #include "base/atomicops.h" | 18 #include "base/atomicops.h" |
| 18 #include "base/base_export.h" | 19 #include "base/base_export.h" |
| 19 #include "base/containers/hash_tables.h" | 20 #include "base/containers/hash_tables.h" |
| 21 #include "base/debug/debugging_flags.h" |
| 22 #include "base/debug/thread_heap_usage_tracker.h" |
| 20 #include "base/gtest_prod_util.h" | 23 #include "base/gtest_prod_util.h" |
| 21 #include "base/lazy_instance.h" | 24 #include "base/lazy_instance.h" |
| 22 #include "base/location.h" | 25 #include "base/location.h" |
| 23 #include "base/macros.h" | 26 #include "base/macros.h" |
| 24 #include "base/process/process_handle.h" | 27 #include "base/process/process_handle.h" |
| 25 #include "base/profiler/tracked_time.h" | 28 #include "base/profiler/tracked_time.h" |
| 26 #include "base/synchronization/lock.h" | 29 #include "base/synchronization/lock.h" |
| 27 #include "base/threading/thread_checker.h" | 30 #include "base/threading/thread_checker.h" |
| 28 #include "base/threading/thread_local_storage.h" | 31 #include "base/threading/thread_local_storage.h" |
| 29 | 32 |
| (...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 241 // When we have a birth we update the count for this birthplace. | 244 // When we have a birth we update the count for this birthplace. |
| 242 void RecordBirth(); | 245 void RecordBirth(); |
| 243 | 246 |
| 244 private: | 247 private: |
| 245 // The number of births on this thread for our location_. | 248 // The number of births on this thread for our location_. |
| 246 int birth_count_; | 249 int birth_count_; |
| 247 | 250 |
| 248 DISALLOW_COPY_AND_ASSIGN(Births); | 251 DISALLOW_COPY_AND_ASSIGN(Births); |
| 249 }; | 252 }; |
| 250 | 253 |
| 254 class DeathData; |
| 255 |
| 251 //------------------------------------------------------------------------------ | 256 //------------------------------------------------------------------------------ |
| 252 // A "snapshotted" representation of the DeathData class. | 257 // A "snapshotted" representation of the DeathData class. |
| 253 | 258 |
| 254 struct BASE_EXPORT DeathDataSnapshot { | 259 struct BASE_EXPORT DeathDataSnapshot { |
| 255 DeathDataSnapshot(); | 260 DeathDataSnapshot(); |
| 256 | 261 |
| 257 // Constructs the snapshot from individual values. | 262 // Constructs the snapshot from individual values. |
| 258 // The alternative would be taking a DeathData parameter, but this would | 263 // The alternative would be taking a DeathData parameter, but this would |
| 259 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing | 264 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing |
| 260 // a wrapper structure as a param or using an empty constructor for | 265 // a wrapper structure as a param or using an empty constructor for |
| 261 // snapshotting DeathData would be less efficient. | 266 // snapshotting DeathData would be less efficient. |
| 262 DeathDataSnapshot(int count, | 267 DeathDataSnapshot(int count, |
| 263 int32_t run_duration_sum, | 268 int32_t run_duration_sum, |
| 264 int32_t run_duration_max, | 269 int32_t run_duration_max, |
| 265 int32_t run_duration_sample, | 270 int32_t run_duration_sample, |
| 266 int32_t queue_duration_sum, | 271 int32_t queue_duration_sum, |
| 267 int32_t queue_duration_max, | 272 int32_t queue_duration_max, |
| 268 int32_t queue_duration_sample); | 273 int32_t queue_duration_sample, |
| 274 int32_t alloc_ops, |
| 275 int32_t free_ops, |
| 276 int32_t allocated_bytes, |
| 277 int32_t freed_bytes, |
| 278 int32_t alloc_overhead_bytes, |
| 279 int32_t max_allocated_bytes); |
| 280 DeathDataSnapshot(const DeathData& death_data); |
| 281 DeathDataSnapshot(const DeathDataSnapshot& other); |
| 269 ~DeathDataSnapshot(); | 282 ~DeathDataSnapshot(); |
| 270 | 283 |
| 271 // Calculates and returns the delta between this snapshot and an earlier | 284 // Calculates and returns the delta between this snapshot and an earlier |
| 272 // snapshot of the same task |older|. | 285 // snapshot of the same task |older|. |
| 273 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; | 286 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; |
| 274 | 287 |
| 275 int count; | 288 int count; |
| 276 int32_t run_duration_sum; | 289 int32_t run_duration_sum; |
| 277 int32_t run_duration_max; | 290 int32_t run_duration_max; |
| 278 int32_t run_duration_sample; | 291 int32_t run_duration_sample; |
| 279 int32_t queue_duration_sum; | 292 int32_t queue_duration_sum; |
| 280 int32_t queue_duration_max; | 293 int32_t queue_duration_max; |
| 281 int32_t queue_duration_sample; | 294 int32_t queue_duration_sample; |
| 295 |
| 296 int32_t alloc_ops; |
| 297 int32_t free_ops; |
| 298 int32_t allocated_bytes; |
| 299 int32_t freed_bytes; |
| 300 int32_t alloc_overhead_bytes; |
| 301 int32_t max_allocated_bytes; |
| 282 }; | 302 }; |
| 283 | 303 |
| 284 //------------------------------------------------------------------------------ | 304 //------------------------------------------------------------------------------ |
| 285 // A "snapshotted" representation of the DeathData for a particular profiling | 305 // A "snapshotted" representation of the DeathData for a particular profiling |
| 286 // phase. Used as an element of the list of phase snapshots owned by DeathData. | 306 // phase. Used as an element of the list of phase snapshots owned by DeathData. |
| 287 | 307 |
| 288 struct DeathDataPhaseSnapshot { | 308 struct DeathDataPhaseSnapshot { |
| 289 DeathDataPhaseSnapshot(int profiling_phase, | 309 DeathDataPhaseSnapshot(int profiling_phase, |
| 290 int count, | 310 const DeathData& death_data, |
| 291 int32_t run_duration_sum, | |
| 292 int32_t run_duration_max, | |
| 293 int32_t run_duration_sample, | |
| 294 int32_t queue_duration_sum, | |
| 295 int32_t queue_duration_max, | |
| 296 int32_t queue_duration_sample, | |
| 297 const DeathDataPhaseSnapshot* prev); | 311 const DeathDataPhaseSnapshot* prev); |
| 298 | 312 |
| 299 // Profiling phase at which completion this snapshot was taken. | 313 // Profiling phase at which completion this snapshot was taken. |
| 300 int profiling_phase; | 314 int profiling_phase; |
| 301 | 315 |
| 302 // Death data snapshot. | 316 // Death data snapshot. |
| 303 DeathDataSnapshot death_data; | 317 DeathDataSnapshot death_data; |
| 304 | 318 |
| 305 // Pointer to a snapshot from the previous phase. | 319 // Pointer to a snapshot from the previous phase. |
| 306 const DeathDataPhaseSnapshot* prev; | 320 const DeathDataPhaseSnapshot* prev; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 319 // snapshotted. | 333 // snapshotted. |
| 320 | 334 |
| 321 class BASE_EXPORT DeathData { | 335 class BASE_EXPORT DeathData { |
| 322 public: | 336 public: |
| 323 DeathData(); | 337 DeathData(); |
| 324 DeathData(const DeathData& other); | 338 DeathData(const DeathData& other); |
| 325 ~DeathData(); | 339 ~DeathData(); |
| 326 | 340 |
| 327 // Update stats for a task destruction (death) that had a Run() time of | 341 // Update stats for a task destruction (death) that had a Run() time of |
| 328 // |duration|, and has had a queueing delay of |queue_duration|. | 342 // |duration|, and has had a queueing delay of |queue_duration|. |
| 329 void RecordDeath(const int32_t queue_duration, | 343 void RecordDurations(const int32_t queue_duration, |
| 330 const int32_t run_duration, | 344 const int32_t run_duration, |
| 331 const uint32_t random_number); | 345 const uint32_t random_number); |
| 346 |
| 347 // Update stats for a task destruction that performed |alloc_ops| |
| 348 // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed |
| 349 // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap |
| 350 // overhead, and where at most |max_allocated_bytes| were outstanding at any |
| 351 // one time. |
| 352 // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated |
| 353 // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops| |
| 354 // yields the average size of allocation. |
| 355 // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory |
| 356 // usage of the task, which can be negative. |
| 357 void RecordAllocations(const uint32_t alloc_ops, |
| 358 const uint32_t free_ops, |
| 359 const uint32_t allocated_bytes, |
| 360 const uint32_t freed_bytes, |
| 361 const uint32_t alloc_overhead_bytes, |
| 362 const uint32_t max_allocated_bytes); |
| 332 | 363 |
| 333 // Metrics and past snapshots accessors, used only for serialization and in | 364 // Metrics and past snapshots accessors, used only for serialization and in |
| 334 // tests. | 365 // tests. |
| 335 int count() const { return base::subtle::NoBarrier_Load(&count_); } | 366 int count() const { return base::subtle::NoBarrier_Load(&count_); } |
| 336 int32_t run_duration_sum() const { | 367 int32_t run_duration_sum() const { |
| 337 return base::subtle::NoBarrier_Load(&run_duration_sum_); | 368 return base::subtle::NoBarrier_Load(&run_duration_sum_); |
| 338 } | 369 } |
| 339 int32_t run_duration_max() const { | 370 int32_t run_duration_max() const { |
| 340 return base::subtle::NoBarrier_Load(&run_duration_max_); | 371 return base::subtle::NoBarrier_Load(&run_duration_max_); |
| 341 } | 372 } |
| 342 int32_t run_duration_sample() const { | 373 int32_t run_duration_sample() const { |
| 343 return base::subtle::NoBarrier_Load(&run_duration_sample_); | 374 return base::subtle::NoBarrier_Load(&run_duration_sample_); |
| 344 } | 375 } |
| 345 int32_t queue_duration_sum() const { | 376 int32_t queue_duration_sum() const { |
| 346 return base::subtle::NoBarrier_Load(&queue_duration_sum_); | 377 return base::subtle::NoBarrier_Load(&queue_duration_sum_); |
| 347 } | 378 } |
| 348 int32_t queue_duration_max() const { | 379 int32_t queue_duration_max() const { |
| 349 return base::subtle::NoBarrier_Load(&queue_duration_max_); | 380 return base::subtle::NoBarrier_Load(&queue_duration_max_); |
| 350 } | 381 } |
| 351 int32_t queue_duration_sample() const { | 382 int32_t queue_duration_sample() const { |
| 352 return base::subtle::NoBarrier_Load(&queue_duration_sample_); | 383 return base::subtle::NoBarrier_Load(&queue_duration_sample_); |
| 353 } | 384 } |
| 385 int32_t alloc_ops() const { |
| 386 return base::subtle::NoBarrier_Load(&alloc_ops_); |
| 387 } |
| 388 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); } |
| 389 int32_t allocated_bytes() const { |
| 390 return base::subtle::NoBarrier_Load(&allocated_bytes_); |
| 391 } |
| 392 int32_t freed_bytes() const { |
| 393 return base::subtle::NoBarrier_Load(&freed_bytes_); |
| 394 } |
| 395 int32_t alloc_overhead_bytes() const { |
| 396 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_); |
| 397 } |
| 398 int32_t max_allocated_bytes() const { |
| 399 return base::subtle::NoBarrier_Load(&max_allocated_bytes_); |
| 400 } |
| 354 const DeathDataPhaseSnapshot* last_phase_snapshot() const { | 401 const DeathDataPhaseSnapshot* last_phase_snapshot() const { |
| 355 return last_phase_snapshot_; | 402 return last_phase_snapshot_; |
| 356 } | 403 } |
| 357 | 404 |
| 358 // Called when the current profiling phase, identified by |profiling_phase|, | 405 // Called when the current profiling phase, identified by |profiling_phase|, |
| 359 // ends. | 406 // ends. |
| 360 // Must be called only on the snapshot thread. | 407 // Must be called only on the snapshot thread. |
| 361 void OnProfilingPhaseCompleted(int profiling_phase); | 408 void OnProfilingPhaseCompleted(int profiling_phase); |
| 362 | 409 |
| 363 private: | 410 private: |
| 411 // A saturating addition operation for member variables. This elides the |
| 412 // use of atomic-primitive reads for members that are only written on the |
| 413 // owning thread. |
| 414 static void SaturatingMemberAdd(const uint32_t addend, |
| 415 base::subtle::Atomic32* sum); |
| 416 |
| 364 // Members are ordered from most regularly read and updated, to least | 417 // Members are ordered from most regularly read and updated, to least |
| 365 // frequently used. This might help a bit with cache lines. | 418 // frequently used. This might help a bit with cache lines. |
| 366 // Number of runs seen (divisor for calculating averages). | 419 // Number of runs seen (divisor for calculating averages). |
| 367 // Can be incremented only on the death thread. | 420 // Can be incremented only on the death thread. |
| 368 base::subtle::Atomic32 count_; | 421 base::subtle::Atomic32 count_; |
| 369 | 422 |
| 370 // Count used in determining probability of selecting exec/queue times from a | 423 // Count used in determining probability of selecting exec/queue times from a |
| 371 // recorded death as samples. | 424 // recorded death as samples. |
| 372 // Gets incremented only on the death thread, but can be set to 0 by | 425 // Gets incremented only on the death thread, but can be set to 0 by |
| 373 // OnProfilingPhaseCompleted() on the snapshot thread. | 426 // OnProfilingPhaseCompleted() on the snapshot thread. |
| 374 base::subtle::Atomic32 sample_probability_count_; | 427 base::subtle::Atomic32 sample_probability_count_; |
| 375 | 428 |
| 376 // Basic tallies, used to compute averages. Can be incremented only on the | 429 // Basic tallies, used to compute averages. Can be incremented only on the |
| 377 // death thread. | 430 // death thread. |
| 378 base::subtle::Atomic32 run_duration_sum_; | 431 base::subtle::Atomic32 run_duration_sum_; |
| 379 base::subtle::Atomic32 queue_duration_sum_; | 432 base::subtle::Atomic32 queue_duration_sum_; |
| 380 // Max values, used by local visualization routines. These are often read, | 433 // Max values, used by local visualization routines. These are often read, |
| 381 // but rarely updated. The max values get assigned only on the death thread, | 434 // but rarely updated. The max values get assigned only on the death thread, |
| 382 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the | 435 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the |
| 383 // snapshot thread. | 436 // snapshot thread. |
| 384 base::subtle::Atomic32 run_duration_max_; | 437 base::subtle::Atomic32 run_duration_max_; |
| 385 base::subtle::Atomic32 queue_duration_max_; | 438 base::subtle::Atomic32 queue_duration_max_; |
| 439 |
| 440 // The cumulative number of allocation and free operations. |
| 441 base::subtle::Atomic32 alloc_ops_; |
| 442 base::subtle::Atomic32 free_ops_; |
| 443 |
| 444 // The number of bytes allocated by the task. |
| 445 base::subtle::Atomic32 allocated_bytes_; |
| 446 |
| 447 // The number of bytes freed by the task. |
| 448 base::subtle::Atomic32 freed_bytes_; |
| 449 |
| 450 // The cumulative number of overhead bytes. Where available this yields an |
| 451 // estimate of the heap overhead for allocations. |
| 452 base::subtle::Atomic32 alloc_overhead_bytes_; |
| 453 |
| 454 // The high-watermark for the number of outstanding heap allocated bytes. |
| 455 base::subtle::Atomic32 max_allocated_bytes_; |
| 456 |
| 386 // Samples, used by crowd sourcing gatherers. These are almost never read, | 457 // Samples, used by crowd sourcing gatherers. These are almost never read, |
| 387 // and rarely updated. They can be modified only on the death thread. | 458 // and rarely updated. They can be modified only on the death thread. |
| 388 base::subtle::Atomic32 run_duration_sample_; | 459 base::subtle::Atomic32 run_duration_sample_; |
| 389 base::subtle::Atomic32 queue_duration_sample_; | 460 base::subtle::Atomic32 queue_duration_sample_; |
| 390 | 461 |
| 391 // Snapshot of this death data made at the last profiling phase completion, if | 462 // Snapshot of this death data made at the last profiling phase completion, if |
| 392 // any. DeathData owns the whole list starting with this pointer. | 463 // any. DeathData owns the whole list starting with this pointer. |
| 393 // Can be accessed only on the snapshot thread. | 464 // Can be accessed only on the snapshot thread. |
| 394 const DeathDataPhaseSnapshot* last_phase_snapshot_; | 465 const DeathDataPhaseSnapshot* last_phase_snapshot_; |
| 395 | 466 |
| (...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 748 | 819 |
| 749 // Returns the start time. | 820 // Returns the start time. |
| 750 TrackedTime StartTime() const; | 821 TrackedTime StartTime() const; |
| 751 | 822 |
| 752 // Task's duration is calculated as the wallclock duration between starting | 823 // Task's duration is calculated as the wallclock duration between starting |
| 753 // and stopping this stopwatch, minus the wallclock durations of any other | 824 // and stopping this stopwatch, minus the wallclock durations of any other |
| 754 // instances that are immediately nested in this one, started and stopped on | 825 // instances that are immediately nested in this one, started and stopped on |
| 755 // this thread during that period. | 826 // this thread during that period. |
| 756 int32_t RunDurationMs() const; | 827 int32_t RunDurationMs() const; |
| 757 | 828 |
| 829 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 830 const base::debug::ThreadHeapUsageTracker& heap_usage() const { |
| 831 return heap_usage_; |
| 832 } |
| 833 bool heap_tracking_enabled() const { return heap_tracking_enabled_; } |
| 834 #endif |
| 835 |
| 758 // Returns tracking info for the current thread. | 836 // Returns tracking info for the current thread. |
| 759 ThreadData* GetThreadData() const; | 837 ThreadData* GetThreadData() const; |
| 760 | 838 |
| 761 private: | 839 private: |
| 762 // Time when the stopwatch was started. | 840 // Time when the stopwatch was started. |
| 763 TrackedTime start_time_; | 841 TrackedTime start_time_; |
| 764 | 842 |
| 843 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER) |
| 844 base::debug::ThreadHeapUsageTracker heap_usage_; |
| 845 bool heap_tracking_enabled_; |
| 846 #endif |
| 847 |
| 765 // Wallclock duration of the task. | 848 // Wallclock duration of the task. |
| 766 int32_t wallclock_duration_ms_; | 849 int32_t wallclock_duration_ms_; |
| 767 | 850 |
| 768 // Tracking info for the current thread. | 851 // Tracking info for the current thread. |
| 769 ThreadData* current_thread_data_; | 852 ThreadData* current_thread_data_; |
| 770 | 853 |
| 771 // Sum of wallclock durations of all stopwatches that were directly nested in | 854 // Sum of wallclock durations of all stopwatches that were directly nested in |
| 772 // this one. | 855 // this one. |
| 773 int32_t excluded_duration_ms_; | 856 int32_t excluded_duration_ms_; |
| 774 | 857 |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 811 ProcessDataSnapshot(const ProcessDataSnapshot& other); | 894 ProcessDataSnapshot(const ProcessDataSnapshot& other); |
| 812 ~ProcessDataSnapshot(); | 895 ~ProcessDataSnapshot(); |
| 813 | 896 |
| 814 PhasedProcessDataSnapshotMap phased_snapshots; | 897 PhasedProcessDataSnapshotMap phased_snapshots; |
| 815 base::ProcessId process_id; | 898 base::ProcessId process_id; |
| 816 }; | 899 }; |
| 817 | 900 |
| 818 } // namespace tracked_objects | 901 } // namespace tracked_objects |
| 819 | 902 |
| 820 #endif // BASE_TRACKED_OBJECTS_H_ | 903 #endif // BASE_TRACKED_OBJECTS_H_ |
| OLD | NEW |