Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(121)

Side by Side Diff: base/tracked_objects.h

Issue 2859493002: Tracked objects: Bump cumulative byte count storage to 64 bits to avoid saturation (Closed)
Patch Set: Address Chris' comment. Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | base/tracked_objects.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACKED_OBJECTS_H_ 5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_
7 7
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <map> 10 #include <map>
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
273 // snapshotting DeathData would be less efficient. 273 // snapshotting DeathData would be less efficient.
274 DeathDataSnapshot(int count, 274 DeathDataSnapshot(int count,
275 int32_t run_duration_sum, 275 int32_t run_duration_sum,
276 int32_t run_duration_max, 276 int32_t run_duration_max,
277 int32_t run_duration_sample, 277 int32_t run_duration_sample,
278 int32_t queue_duration_sum, 278 int32_t queue_duration_sum,
279 int32_t queue_duration_max, 279 int32_t queue_duration_max,
280 int32_t queue_duration_sample, 280 int32_t queue_duration_sample,
281 int32_t alloc_ops, 281 int32_t alloc_ops,
282 int32_t free_ops, 282 int32_t free_ops,
283 int32_t allocated_bytes, 283 int64_t allocated_bytes,
284 int32_t freed_bytes, 284 int64_t freed_bytes,
285 int32_t alloc_overhead_bytes, 285 int64_t alloc_overhead_bytes,
286 int32_t max_allocated_bytes); 286 int32_t max_allocated_bytes);
287 DeathDataSnapshot(const DeathData& death_data); 287 DeathDataSnapshot(const DeathData& death_data);
288 DeathDataSnapshot(const DeathDataSnapshot& other); 288 DeathDataSnapshot(const DeathDataSnapshot& other);
289 ~DeathDataSnapshot(); 289 ~DeathDataSnapshot();
290 290
291 // Calculates and returns the delta between this snapshot and an earlier 291 // Calculates and returns the delta between this snapshot and an earlier
292 // snapshot of the same task |older|. 292 // snapshot of the same task |older|.
293 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; 293 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
294 294
295 int count; 295 int count;
296 int32_t run_duration_sum; 296 int32_t run_duration_sum;
297 int32_t run_duration_max; 297 int32_t run_duration_max;
298 int32_t run_duration_sample; 298 int32_t run_duration_sample;
299 int32_t queue_duration_sum; 299 int32_t queue_duration_sum;
300 int32_t queue_duration_max; 300 int32_t queue_duration_max;
301 int32_t queue_duration_sample; 301 int32_t queue_duration_sample;
302 302
303 int32_t alloc_ops; 303 int32_t alloc_ops;
304 int32_t free_ops; 304 int32_t free_ops;
305 int32_t allocated_bytes; 305 int64_t allocated_bytes;
306 int32_t freed_bytes; 306 int64_t freed_bytes;
307 int32_t alloc_overhead_bytes; 307 int64_t alloc_overhead_bytes;
308 int32_t max_allocated_bytes; 308 int32_t max_allocated_bytes;
309 }; 309 };
310 310
311 //------------------------------------------------------------------------------ 311 //------------------------------------------------------------------------------
312 // A "snapshotted" representation of the DeathData for a particular profiling 312 // A "snapshotted" representation of the DeathData for a particular profiling
313 // phase. Used as an element of the list of phase snapshots owned by DeathData. 313 // phase. Used as an element of the list of phase snapshots owned by DeathData.
314 314
315 struct DeathDataPhaseSnapshot { 315 struct DeathDataPhaseSnapshot {
316 DeathDataPhaseSnapshot(int profiling_phase, 316 DeathDataPhaseSnapshot(int profiling_phase,
317 const DeathData& death_data, 317 const DeathData& death_data,
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
386 int32_t queue_duration_max() const { 386 int32_t queue_duration_max() const {
387 return base::subtle::NoBarrier_Load(&queue_duration_max_); 387 return base::subtle::NoBarrier_Load(&queue_duration_max_);
388 } 388 }
389 int32_t queue_duration_sample() const { 389 int32_t queue_duration_sample() const {
390 return base::subtle::NoBarrier_Load(&queue_duration_sample_); 390 return base::subtle::NoBarrier_Load(&queue_duration_sample_);
391 } 391 }
392 int32_t alloc_ops() const { 392 int32_t alloc_ops() const {
393 return base::subtle::NoBarrier_Load(&alloc_ops_); 393 return base::subtle::NoBarrier_Load(&alloc_ops_);
394 } 394 }
395 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); } 395 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
396 int32_t allocated_bytes() const { 396 int64_t allocated_bytes() const {
397 return base::subtle::NoBarrier_Load(&allocated_bytes_); 397 return ConsistentCumulativeByteCountRead(&allocated_bytes_);
398 } 398 }
399 int32_t freed_bytes() const { 399 int64_t freed_bytes() const {
400 return base::subtle::NoBarrier_Load(&freed_bytes_); 400 return ConsistentCumulativeByteCountRead(&freed_bytes_);
401 } 401 }
402 int32_t alloc_overhead_bytes() const { 402 int64_t alloc_overhead_bytes() const {
403 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_); 403 return ConsistentCumulativeByteCountRead(&alloc_overhead_bytes_);
404 } 404 }
405 int32_t max_allocated_bytes() const { 405 int64_t max_allocated_bytes() const {
406 return base::subtle::NoBarrier_Load(&max_allocated_bytes_); 406 return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
407 } 407 }
408 const DeathDataPhaseSnapshot* last_phase_snapshot() const { 408 const DeathDataPhaseSnapshot* last_phase_snapshot() const {
409 return last_phase_snapshot_; 409 return last_phase_snapshot_;
410 } 410 }
411 411
412 // Called when the current profiling phase, identified by |profiling_phase|, 412 // Called when the current profiling phase, identified by |profiling_phase|,
413 // ends. 413 // ends.
414 // Must be called only on the snapshot thread. 414 // Must be called only on the snapshot thread.
415 void OnProfilingPhaseCompleted(int profiling_phase); 415 void OnProfilingPhaseCompleted(int profiling_phase);
416 416
417 private: 417 private:
418 #if defined(ARCH_CPU_64_BITS)
419 using CumulativeByteCount = base::subtle::Atomic64;
420 #else
421 struct CumulativeByteCount {
422 base::subtle::Atomic32 hi_word;
423 base::subtle::Atomic32 lo_word;
424 };
425 #endif
426
427 // Reads a cumulative byte counter consistently.
428 int64_t ConsistentCumulativeByteCountRead(
429 const CumulativeByteCount* count) const;
430
431 // Reads the value of a cumulative byte count, only returns consistent
432 // results on the owning thread.
433 static int64_t CumulativeByteCountRead(const CumulativeByteCount* count);
434
418 // A saturating addition operation for member variables. This elides the 435 // A saturating addition operation for member variables. This elides the
419 // use of atomic-primitive reads for members that are only written on the 436 // use of atomic-primitive reads for members that are only written on the
420 // owning thread. 437 // owning thread.
421 static void SaturatingMemberAdd(const uint32_t addend, 438 static void SaturatingMemberAdd(const uint32_t addend,
422 base::subtle::Atomic32* sum); 439 base::subtle::Atomic32* sum);
423 440
441 void SaturatingByteCountMemberAdd(const uint32_t addend,
442 CumulativeByteCount* sum);
443
424 // Members are ordered from most regularly read and updated, to least 444 // Members are ordered from most regularly read and updated, to least
425 // frequently used. This might help a bit with cache lines. 445 // frequently used. This might help a bit with cache lines.
426 // Number of runs seen (divisor for calculating averages). 446 // Number of runs seen (divisor for calculating averages).
427 // Can be incremented only on the death thread. 447 // Can be incremented only on the death thread.
428 base::subtle::Atomic32 count_; 448 base::subtle::Atomic32 count_;
429 449
430 // Count used in determining probability of selecting exec/queue times from a 450 // Count used in determining probability of selecting exec/queue times from a
431 // recorded death as samples. 451 // recorded death as samples.
432 // Gets incremented only on the death thread, but can be set to 0 by 452 // Gets incremented only on the death thread, but can be set to 0 by
433 // OnProfilingPhaseCompleted() on the snapshot thread. 453 // OnProfilingPhaseCompleted() on the snapshot thread.
434 base::subtle::Atomic32 sample_probability_count_; 454 base::subtle::Atomic32 sample_probability_count_;
435 455
436 // Basic tallies, used to compute averages. Can be incremented only on the 456 // Basic tallies, used to compute averages. Can be incremented only on the
437 // death thread. 457 // death thread.
438 base::subtle::Atomic32 run_duration_sum_; 458 base::subtle::Atomic32 run_duration_sum_;
439 base::subtle::Atomic32 queue_duration_sum_; 459 base::subtle::Atomic32 queue_duration_sum_;
440 // Max values, used by local visualization routines. These are often read, 460 // Max values, used by local visualization routines. These are often read,
441 // but rarely updated. The max values get assigned only on the death thread, 461 // but rarely updated. The max values get assigned only on the death thread,
442 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the 462 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
443 // snapshot thread. 463 // snapshot thread.
444 base::subtle::Atomic32 run_duration_max_; 464 base::subtle::Atomic32 run_duration_max_;
445 base::subtle::Atomic32 queue_duration_max_; 465 base::subtle::Atomic32 queue_duration_max_;
446 466
447 // The cumulative number of allocation and free operations. 467 // The cumulative number of allocation and free operations.
448 base::subtle::Atomic32 alloc_ops_; 468 base::subtle::Atomic32 alloc_ops_;
449 base::subtle::Atomic32 free_ops_; 469 base::subtle::Atomic32 free_ops_;
450 470
471 #if !defined(ARCH_CPU_64_BITS)
472 // On 32 bit systems this is used to achieve consistent reads for cumulative
473 // byte counts. This is odd while updates are in progress, and even while
474 // quiescent. If this has the same value before and after reading the
475 // cumulative counts, the read is consistent.
476 base::subtle::Atomic32 byte_update_counter_;
477 #endif
478
451 // The number of bytes allocated by the task. 479 // The number of bytes allocated by the task.
452 base::subtle::Atomic32 allocated_bytes_; 480 CumulativeByteCount allocated_bytes_;
453 481
454 // The number of bytes freed by the task. 482 // The number of bytes freed by the task.
455 base::subtle::Atomic32 freed_bytes_; 483 CumulativeByteCount freed_bytes_;
456 484
457 // The cumulative number of overhead bytes. Where available this yields an 485 // The cumulative number of overhead bytes. Where available this yields an
458 // estimate of the heap overhead for allocations. 486 // estimate of the heap overhead for allocations.
459 base::subtle::Atomic32 alloc_overhead_bytes_; 487 CumulativeByteCount alloc_overhead_bytes_;
460 488
461 // The high-watermark for the number of outstanding heap allocated bytes. 489 // The high-watermark for the number of outstanding heap allocated bytes.
462 base::subtle::Atomic32 max_allocated_bytes_; 490 base::subtle::Atomic32 max_allocated_bytes_;
463 491
464 // Samples, used by crowd sourcing gatherers. These are almost never read, 492 // Samples, used by crowd sourcing gatherers. These are almost never read,
465 // and rarely updated. They can be modified only on the death thread. 493 // and rarely updated. They can be modified only on the death thread.
466 base::subtle::Atomic32 run_duration_sample_; 494 base::subtle::Atomic32 run_duration_sample_;
467 base::subtle::Atomic32 queue_duration_sample_; 495 base::subtle::Atomic32 queue_duration_sample_;
468 496
469 // Snapshot of this death data made at the last profiling phase completion, if 497 // Snapshot of this death data made at the last profiling phase completion, if
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
890 ProcessDataSnapshot(const ProcessDataSnapshot& other); 918 ProcessDataSnapshot(const ProcessDataSnapshot& other);
891 ~ProcessDataSnapshot(); 919 ~ProcessDataSnapshot();
892 920
893 PhasedProcessDataSnapshotMap phased_snapshots; 921 PhasedProcessDataSnapshotMap phased_snapshots;
894 base::ProcessId process_id; 922 base::ProcessId process_id;
895 }; 923 };
896 924
897 } // namespace tracked_objects 925 } // namespace tracked_objects
898 926
899 #endif // BASE_TRACKED_OBJECTS_H_ 927 #endif // BASE_TRACKED_OBJECTS_H_
OLDNEW
« no previous file with comments | « no previous file | base/tracked_objects.cc » ('j') | base/tracked_objects.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698