Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: base/tracked_objects.h

Issue 2859493002: Tracked objects: Bump cumulative byte count storage to 64 bits to avoid saturation (Closed)
Patch Set: Fix 64 bit compile, doofus!. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACKED_OBJECTS_H_ 5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_
7 7
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <map> 10 #include <map>
(...skipping 10 matching lines...) Expand all
21 #include "base/containers/hash_tables.h" 21 #include "base/containers/hash_tables.h"
22 #include "base/debug/debugging_flags.h" 22 #include "base/debug/debugging_flags.h"
23 #include "base/debug/thread_heap_usage_tracker.h" 23 #include "base/debug/thread_heap_usage_tracker.h"
24 #include "base/gtest_prod_util.h" 24 #include "base/gtest_prod_util.h"
25 #include "base/lazy_instance.h" 25 #include "base/lazy_instance.h"
26 #include "base/location.h" 26 #include "base/location.h"
27 #include "base/macros.h" 27 #include "base/macros.h"
28 #include "base/process/process_handle.h" 28 #include "base/process/process_handle.h"
29 #include "base/profiler/tracked_time.h" 29 #include "base/profiler/tracked_time.h"
30 #include "base/synchronization/lock.h" 30 #include "base/synchronization/lock.h"
31 #include "base/threading/thread_checker.h"
32 #include "base/threading/thread_local_storage.h" 31 #include "base/threading/thread_local_storage.h"
33 32
34 namespace base { 33 namespace base {
35 struct TrackingInfo; 34 struct TrackingInfo;
36 } 35 }
37 36
38 // TrackedObjects provides a database of stats about objects (generally Tasks) 37 // TrackedObjects provides a database of stats about objects (generally Tasks)
39 // that are tracked. Tracking means their birth, death, duration, birth thread, 38 // that are tracked. Tracking means their birth, death, duration, birth thread,
40 // death thread, and birth place are recorded. This data is carefully spread 39 // death thread, and birth place are recorded. This data is carefully spread
41 // across a series of objects so that the counts and times can be rapidly 40 // across a series of objects so that the counts and times can be rapidly
(...skipping 231 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 // snapshotting DeathData would be less efficient. 272 // snapshotting DeathData would be less efficient.
274 DeathDataSnapshot(int count, 273 DeathDataSnapshot(int count,
275 int32_t run_duration_sum, 274 int32_t run_duration_sum,
276 int32_t run_duration_max, 275 int32_t run_duration_max,
277 int32_t run_duration_sample, 276 int32_t run_duration_sample,
278 int32_t queue_duration_sum, 277 int32_t queue_duration_sum,
279 int32_t queue_duration_max, 278 int32_t queue_duration_max,
280 int32_t queue_duration_sample, 279 int32_t queue_duration_sample,
281 int32_t alloc_ops, 280 int32_t alloc_ops,
282 int32_t free_ops, 281 int32_t free_ops,
283 int32_t allocated_bytes, 282 int64_t allocated_bytes,
284 int32_t freed_bytes, 283 int64_t freed_bytes,
285 int32_t alloc_overhead_bytes, 284 int64_t alloc_overhead_bytes,
286 int32_t max_allocated_bytes); 285 int32_t max_allocated_bytes);
287 DeathDataSnapshot(const DeathData& death_data); 286 DeathDataSnapshot(const DeathData& death_data);
288 DeathDataSnapshot(const DeathDataSnapshot& other); 287 DeathDataSnapshot(const DeathDataSnapshot& other);
289 ~DeathDataSnapshot(); 288 ~DeathDataSnapshot();
290 289
291 // Calculates and returns the delta between this snapshot and an earlier 290 // Calculates and returns the delta between this snapshot and an earlier
292 // snapshot of the same task |older|. 291 // snapshot of the same task |older|.
293 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; 292 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
294 293
295 int count; 294 int count;
296 int32_t run_duration_sum; 295 int32_t run_duration_sum;
297 int32_t run_duration_max; 296 int32_t run_duration_max;
298 int32_t run_duration_sample; 297 int32_t run_duration_sample;
299 int32_t queue_duration_sum; 298 int32_t queue_duration_sum;
300 int32_t queue_duration_max; 299 int32_t queue_duration_max;
301 int32_t queue_duration_sample; 300 int32_t queue_duration_sample;
302 301
303 int32_t alloc_ops; 302 int32_t alloc_ops;
304 int32_t free_ops; 303 int32_t free_ops;
305 int32_t allocated_bytes; 304 int64_t allocated_bytes;
306 int32_t freed_bytes; 305 int64_t freed_bytes;
307 int32_t alloc_overhead_bytes; 306 int64_t alloc_overhead_bytes;
308 int32_t max_allocated_bytes; 307 int32_t max_allocated_bytes;
309 }; 308 };
310 309
311 //------------------------------------------------------------------------------ 310 //------------------------------------------------------------------------------
312 // A "snapshotted" representation of the DeathData for a particular profiling 311 // A "snapshotted" representation of the DeathData for a particular profiling
313 // phase. Used as an element of the list of phase snapshots owned by DeathData. 312 // phase. Used as an element of the list of phase snapshots owned by DeathData.
314 313
315 struct DeathDataPhaseSnapshot { 314 struct DeathDataPhaseSnapshot {
316 DeathDataPhaseSnapshot(int profiling_phase, 315 DeathDataPhaseSnapshot(int profiling_phase,
317 const DeathData& death_data, 316 const DeathData& death_data,
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 int32_t queue_duration_max() const { 385 int32_t queue_duration_max() const {
387 return base::subtle::NoBarrier_Load(&queue_duration_max_); 386 return base::subtle::NoBarrier_Load(&queue_duration_max_);
388 } 387 }
389 int32_t queue_duration_sample() const { 388 int32_t queue_duration_sample() const {
390 return base::subtle::NoBarrier_Load(&queue_duration_sample_); 389 return base::subtle::NoBarrier_Load(&queue_duration_sample_);
391 } 390 }
392 int32_t alloc_ops() const { 391 int32_t alloc_ops() const {
393 return base::subtle::NoBarrier_Load(&alloc_ops_); 392 return base::subtle::NoBarrier_Load(&alloc_ops_);
394 } 393 }
395 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); } 394 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
396 int32_t allocated_bytes() const { 395 int64_t allocated_bytes() const {
397 return base::subtle::NoBarrier_Load(&allocated_bytes_); 396 return ConsistentCumulativeByteCountRead(&allocated_bytes_);
398 } 397 }
399 int32_t freed_bytes() const { 398 int64_t freed_bytes() const {
400 return base::subtle::NoBarrier_Load(&freed_bytes_); 399 return ConsistentCumulativeByteCountRead(&freed_bytes_);
401 } 400 }
402 int32_t alloc_overhead_bytes() const { 401 int64_t alloc_overhead_bytes() const {
403 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_); 402 return ConsistentCumulativeByteCountRead(&alloc_overhead_bytes_);
404 } 403 }
405 int32_t max_allocated_bytes() const { 404 int64_t max_allocated_bytes() const {
406 return base::subtle::NoBarrier_Load(&max_allocated_bytes_); 405 return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
407 } 406 }
408 const DeathDataPhaseSnapshot* last_phase_snapshot() const { 407 const DeathDataPhaseSnapshot* last_phase_snapshot() const {
409 return last_phase_snapshot_; 408 return last_phase_snapshot_;
410 } 409 }
411 410
412 // Called when the current profiling phase, identified by |profiling_phase|, 411 // Called when the current profiling phase, identified by |profiling_phase|,
413 // ends. 412 // ends.
414 // Must be called only on the snapshot thread. 413 // Must be called only on the snapshot thread.
415 void OnProfilingPhaseCompleted(int profiling_phase); 414 void OnProfilingPhaseCompleted(int profiling_phase);
416 415
417 private: 416 private:
417 #if defined(ARCH_CPU_64_BITS)
418 using CumulativeByteCount = base::subtle::Atomic64;
419 #else
420 struct CumulativeByteCount {
421 base::subtle::Atomic32 hi_word;
422 base::subtle::Atomic32 lo_word;
423 };
424 #endif
425
426 // Reads a cumulative byte counter consistently.
427 int64_t ConsistentCumulativeByteCountRead(
428 const CumulativeByteCount* count) const;
429
430 // Reads the value of a cumulative byte count, only returns consistent
431 // results on the owning thread.
432 static int64_t UnsafeCumulativeByteCountRead(
433 const CumulativeByteCount* count);
434
418 // A saturating addition operation for member variables. This elides the 435 // A saturating addition operation for member variables. This elides the
419 // use of atomic-primitive reads for members that are only written on the 436 // use of atomic-primitive reads for members that are only written on the
420 // owning thread. 437 // owning thread.
421 static void SaturatingMemberAdd(const uint32_t addend, 438 static void SaturatingMemberAdd(const uint32_t addend,
422 base::subtle::Atomic32* sum); 439 base::subtle::Atomic32* sum);
423 440
441 // A saturating addition operation for byte count variables.
442 // On 32 bit machines, this may only be called while |byte_update_counter_|
443 // is odd - e.g. locked.
444 void SaturatingByteCountMemberAdd(const uint32_t addend,
445 CumulativeByteCount* sum);
446
424 // Members are ordered from most regularly read and updated, to least 447 // Members are ordered from most regularly read and updated, to least
425 // frequently used. This might help a bit with cache lines. 448 // frequently used. This might help a bit with cache lines.
426 // Number of runs seen (divisor for calculating averages). 449 // Number of runs seen (divisor for calculating averages).
427 // Can be incremented only on the death thread. 450 // Can be incremented only on the death thread.
428 base::subtle::Atomic32 count_; 451 base::subtle::Atomic32 count_;
429 452
430 // Count used in determining probability of selecting exec/queue times from a 453 // Count used in determining probability of selecting exec/queue times from a
431 // recorded death as samples. 454 // recorded death as samples.
432 // Gets incremented only on the death thread, but can be set to 0 by 455 // Gets incremented only on the death thread, but can be set to 0 by
433 // OnProfilingPhaseCompleted() on the snapshot thread. 456 // OnProfilingPhaseCompleted() on the snapshot thread.
434 base::subtle::Atomic32 sample_probability_count_; 457 base::subtle::Atomic32 sample_probability_count_;
435 458
436 // Basic tallies, used to compute averages. Can be incremented only on the 459 // Basic tallies, used to compute averages. Can be incremented only on the
437 // death thread. 460 // death thread.
438 base::subtle::Atomic32 run_duration_sum_; 461 base::subtle::Atomic32 run_duration_sum_;
439 base::subtle::Atomic32 queue_duration_sum_; 462 base::subtle::Atomic32 queue_duration_sum_;
440 // Max values, used by local visualization routines. These are often read, 463 // Max values, used by local visualization routines. These are often read,
441 // but rarely updated. The max values get assigned only on the death thread, 464 // but rarely updated. The max values get assigned only on the death thread,
442 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the 465 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
443 // snapshot thread. 466 // snapshot thread.
444 base::subtle::Atomic32 run_duration_max_; 467 base::subtle::Atomic32 run_duration_max_;
445 base::subtle::Atomic32 queue_duration_max_; 468 base::subtle::Atomic32 queue_duration_max_;
446 469
447 // The cumulative number of allocation and free operations. 470 // The cumulative number of allocation and free operations.
448 base::subtle::Atomic32 alloc_ops_; 471 base::subtle::Atomic32 alloc_ops_;
449 base::subtle::Atomic32 free_ops_; 472 base::subtle::Atomic32 free_ops_;
450 473
474 #if !defined(ARCH_CPU_64_BITS)
475 // On 32 bit systems this is used to achieve consistent reads for cumulative
476 // byte counts. This is odd while updates are in progress, and even while
477 // quiescent. If this has the same value before and after reading the
478 // cumulative counts, the read is consistent.
479 base::subtle::Atomic32 byte_update_counter_;
480 #endif
481
451 // The number of bytes allocated by the task. 482 // The number of bytes allocated by the task.
452 base::subtle::Atomic32 allocated_bytes_; 483 CumulativeByteCount allocated_bytes_;
453 484
454 // The number of bytes freed by the task. 485 // The number of bytes freed by the task.
455 base::subtle::Atomic32 freed_bytes_; 486 CumulativeByteCount freed_bytes_;
456 487
457 // The cumulative number of overhead bytes. Where available this yields an 488 // The cumulative number of overhead bytes. Where available this yields an
458 // estimate of the heap overhead for allocations. 489 // estimate of the heap overhead for allocations.
459 base::subtle::Atomic32 alloc_overhead_bytes_; 490 CumulativeByteCount alloc_overhead_bytes_;
460 491
461 // The high-watermark for the number of outstanding heap allocated bytes. 492 // The high-watermark for the number of outstanding heap allocated bytes.
462 base::subtle::Atomic32 max_allocated_bytes_; 493 base::subtle::Atomic32 max_allocated_bytes_;
463 494
464 // Samples, used by crowd sourcing gatherers. These are almost never read, 495 // Samples, used by crowd sourcing gatherers. These are almost never read,
465 // and rarely updated. They can be modified only on the death thread. 496 // and rarely updated. They can be modified only on the death thread.
466 base::subtle::Atomic32 run_duration_sample_; 497 base::subtle::Atomic32 run_duration_sample_;
467 base::subtle::Atomic32 queue_duration_sample_; 498 base::subtle::Atomic32 queue_duration_sample_;
468 499
469 // Snapshot of this death data made at the last profiling phase completion, if 500 // Snapshot of this death data made at the last profiling phase completion, if
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
890 ProcessDataSnapshot(const ProcessDataSnapshot& other); 921 ProcessDataSnapshot(const ProcessDataSnapshot& other);
891 ~ProcessDataSnapshot(); 922 ~ProcessDataSnapshot();
892 923
893 PhasedProcessDataSnapshotMap phased_snapshots; 924 PhasedProcessDataSnapshotMap phased_snapshots;
894 base::ProcessId process_id; 925 base::ProcessId process_id;
895 }; 926 };
896 927
897 } // namespace tracked_objects 928 } // namespace tracked_objects
898 929
899 #endif // BASE_TRACKED_OBJECTS_H_ 930 #endif // BASE_TRACKED_OBJECTS_H_
OLDNEW
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698