Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(124)

Side by Side Diff: base/tracked_objects.h

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Figure out where the @#$%! corruption is coming from. Move heap tracking to TaskStopwatch." Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACKED_OBJECTS_H_ 5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_
7 7
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <map> 10 #include <map>
11 #include <set> 11 #include <set>
12 #include <stack> 12 #include <stack>
13 #include <string> 13 #include <string>
14 #include <utility> 14 #include <utility>
15 #include <vector> 15 #include <vector>
16 16
17 #include "base/allocator/features.h"
17 #include "base/atomicops.h" 18 #include "base/atomicops.h"
18 #include "base/base_export.h" 19 #include "base/base_export.h"
20 #include "base/debug/scoped_thread_heap_usage.h"
19 #include "base/containers/hash_tables.h" 21 #include "base/containers/hash_tables.h"
20 #include "base/gtest_prod_util.h" 22 #include "base/gtest_prod_util.h"
21 #include "base/lazy_instance.h" 23 #include "base/lazy_instance.h"
22 #include "base/location.h" 24 #include "base/location.h"
23 #include "base/macros.h" 25 #include "base/macros.h"
24 #include "base/process/process_handle.h" 26 #include "base/process/process_handle.h"
25 #include "base/profiler/tracked_time.h" 27 #include "base/profiler/tracked_time.h"
26 #include "base/synchronization/lock.h" 28 #include "base/synchronization/lock.h"
27 #include "base/threading/thread_checker.h" 29 #include "base/threading/thread_checker.h"
28 #include "base/threading/thread_local_storage.h" 30 #include "base/threading/thread_local_storage.h"
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
241 // When we have a birth we update the count for this birthplace. 243 // When we have a birth we update the count for this birthplace.
242 void RecordBirth(); 244 void RecordBirth();
243 245
244 private: 246 private:
245 // The number of births on this thread for our location_. 247 // The number of births on this thread for our location_.
246 int birth_count_; 248 int birth_count_;
247 249
248 DISALLOW_COPY_AND_ASSIGN(Births); 250 DISALLOW_COPY_AND_ASSIGN(Births);
249 }; 251 };
250 252
253 // forward
254 class DeathData;
251 //------------------------------------------------------------------------------ 255 //------------------------------------------------------------------------------
252 // A "snapshotted" representation of the DeathData class. 256 // A "snapshotted" representation of the DeathData class.
253 257
254 struct BASE_EXPORT DeathDataSnapshot { 258 struct BASE_EXPORT DeathDataSnapshot {
255 DeathDataSnapshot(); 259 DeathDataSnapshot();
256 260
257 // Constructs the snapshot from individual values. 261 // Constructs the snapshot from individual values.
258 // The alternative would be taking a DeathData parameter, but this would 262 // The alternative would be taking a DeathData parameter, but this would
259 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing 263 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing
260 // a wrapper structure as a param or using an empty constructor for 264 // a wrapper structure as a param or using an empty constructor for
261 // snapshotting DeathData would be less efficient. 265 // snapshotting DeathData would be less efficient.
262 DeathDataSnapshot(int count, 266 DeathDataSnapshot(int count,
263 int32_t run_duration_sum, 267 int32_t run_duration_sum,
264 int32_t run_duration_max, 268 int32_t run_duration_max,
265 int32_t run_duration_sample, 269 int32_t run_duration_sample,
266 int32_t queue_duration_sum, 270 int32_t queue_duration_sum,
267 int32_t queue_duration_max, 271 int32_t queue_duration_max,
268 int32_t queue_duration_sample); 272 int32_t queue_duration_sample,
273 int32_t alloc_ops,
274 int32_t free_ops,
275 int32_t allocated_bytes,
276 int32_t freed_bytes,
277 int32_t alloc_overhead_bytes,
278 int32_t max_allocated_bytes);
279 DeathDataSnapshot(const DeathData& death_data);
269 ~DeathDataSnapshot(); 280 ~DeathDataSnapshot();
270 281
271 // Calculates and returns the delta between this snapshot and an earlier 282 // Calculates and returns the delta between this snapshot and an earlier
272 // snapshot of the same task |older|. 283 // snapshot of the same task |older|.
273 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; 284 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
274 285
275 int count; 286 int count;
276 int32_t run_duration_sum; 287 int32_t run_duration_sum;
277 int32_t run_duration_max; 288 int32_t run_duration_max;
278 int32_t run_duration_sample; 289 int32_t run_duration_sample;
279 int32_t queue_duration_sum; 290 int32_t queue_duration_sum;
280 int32_t queue_duration_max; 291 int32_t queue_duration_max;
281 int32_t queue_duration_sample; 292 int32_t queue_duration_sample;
293
294 int32_t alloc_ops;
295 int32_t free_ops;
296 int32_t allocated_bytes;
297 int32_t freed_bytes;
298 int32_t alloc_overhead_bytes;
299 int32_t max_allocated_bytes;
282 }; 300 };
283 301
284 //------------------------------------------------------------------------------ 302 //------------------------------------------------------------------------------
285 // A "snapshotted" representation of the DeathData for a particular profiling 303 // A "snapshotted" representation of the DeathData for a particular profiling
286 // phase. Used as an element of the list of phase snapshots owned by DeathData. 304 // phase. Used as an element of the list of phase snapshots owned by DeathData.
287 305
288 struct DeathDataPhaseSnapshot { 306 struct DeathDataPhaseSnapshot {
289 DeathDataPhaseSnapshot(int profiling_phase, 307 DeathDataPhaseSnapshot(int profiling_phase,
290 int count, 308 const DeathData& death_data,
291 int32_t run_duration_sum,
292 int32_t run_duration_max,
293 int32_t run_duration_sample,
294 int32_t queue_duration_sum,
295 int32_t queue_duration_max,
296 int32_t queue_duration_sample,
297 const DeathDataPhaseSnapshot* prev); 309 const DeathDataPhaseSnapshot* prev);
298 310
299 // Profiling phase at which completion this snapshot was taken. 311 // Profiling phase at which completion this snapshot was taken.
300 int profiling_phase; 312 int profiling_phase;
301 313
302 // Death data snapshot. 314 // Death data snapshot.
303 DeathDataSnapshot death_data; 315 DeathDataSnapshot death_data;
304 316
305 // Pointer to a snapshot from the previous phase. 317 // Pointer to a snapshot from the previous phase.
306 const DeathDataPhaseSnapshot* prev; 318 const DeathDataPhaseSnapshot* prev;
(...skipping 12 matching lines...) Expand all
319 // snapshotted. 331 // snapshotted.
320 332
321 class BASE_EXPORT DeathData { 333 class BASE_EXPORT DeathData {
322 public: 334 public:
323 DeathData(); 335 DeathData();
324 DeathData(const DeathData& other); 336 DeathData(const DeathData& other);
325 ~DeathData(); 337 ~DeathData();
326 338
327 // Update stats for a task destruction (death) that had a Run() time of 339 // Update stats for a task destruction (death) that had a Run() time of
328 // |duration|, and has had a queueing delay of |queue_duration|. 340 // |duration|, and has had a queueing delay of |queue_duration|.
329 void RecordDeath(const int32_t queue_duration, 341 void RecordDurations(const int32_t queue_duration,
330 const int32_t run_duration, 342 const int32_t run_duration,
331 const uint32_t random_number); 343 const uint32_t random_number);
344 // Update stats for a task desctruction that performed |alloc_ops|
345 // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed
346 // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap
347 // overhead, and where at most |max_allocated_bytes| were outstanding at any
348 // one time.
349 // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated
350 // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops|
351 // yields the average size of allocation.
352 // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory
353 // usage of the task, which can be negative.
354 void RecordAllocations(const uint32_t alloc_ops,
355 const uint32_t free_ops,
356 const uint32_t allocated_bytes,
357 const uint32_t freed_bytes,
358 const uint32_t alloc_overhead_bytes,
359 const uint32_t max_allocated_bytes);
332 360
333 // Metrics and past snapshots accessors, used only for serialization and in 361 // Metrics and past snapshots accessors, used only for serialization and in
334 // tests. 362 // tests.
335 int count() const { return base::subtle::NoBarrier_Load(&count_); } 363 int count() const { return base::subtle::NoBarrier_Load(&count_); }
336 int32_t run_duration_sum() const { 364 int32_t run_duration_sum() const {
337 return base::subtle::NoBarrier_Load(&run_duration_sum_); 365 return base::subtle::NoBarrier_Load(&run_duration_sum_);
338 } 366 }
339 int32_t run_duration_max() const { 367 int32_t run_duration_max() const {
340 return base::subtle::NoBarrier_Load(&run_duration_max_); 368 return base::subtle::NoBarrier_Load(&run_duration_max_);
341 } 369 }
342 int32_t run_duration_sample() const { 370 int32_t run_duration_sample() const {
343 return base::subtle::NoBarrier_Load(&run_duration_sample_); 371 return base::subtle::NoBarrier_Load(&run_duration_sample_);
344 } 372 }
345 int32_t queue_duration_sum() const { 373 int32_t queue_duration_sum() const {
346 return base::subtle::NoBarrier_Load(&queue_duration_sum_); 374 return base::subtle::NoBarrier_Load(&queue_duration_sum_);
347 } 375 }
348 int32_t queue_duration_max() const { 376 int32_t queue_duration_max() const {
349 return base::subtle::NoBarrier_Load(&queue_duration_max_); 377 return base::subtle::NoBarrier_Load(&queue_duration_max_);
350 } 378 }
351 int32_t queue_duration_sample() const { 379 int32_t queue_duration_sample() const {
352 return base::subtle::NoBarrier_Load(&queue_duration_sample_); 380 return base::subtle::NoBarrier_Load(&queue_duration_sample_);
353 } 381 }
382 int32_t alloc_ops() const {
383 return base::subtle::NoBarrier_Load(&alloc_ops_);
384 }
385 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
386 int32_t allocated_bytes() const {
387 return base::subtle::NoBarrier_Load(&allocated_bytes_);
388 }
389 int32_t freed_bytes() const {
390 return base::subtle::NoBarrier_Load(&freed_bytes_);
391 }
392 int32_t alloc_overhead_bytes() const {
393 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_);
394 }
395 int32_t max_allocated_bytes() const {
396 return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
397 }
354 const DeathDataPhaseSnapshot* last_phase_snapshot() const { 398 const DeathDataPhaseSnapshot* last_phase_snapshot() const {
355 return last_phase_snapshot_; 399 return last_phase_snapshot_;
356 } 400 }
357 401
358 // Called when the current profiling phase, identified by |profiling_phase|, 402 // Called when the current profiling phase, identified by |profiling_phase|,
359 // ends. 403 // ends.
360 // Must be called only on the snapshot thread. 404 // Must be called only on the snapshot thread.
361 void OnProfilingPhaseCompleted(int profiling_phase); 405 void OnProfilingPhaseCompleted(int profiling_phase);
362 406
363 private: 407 private:
(...skipping 17 matching lines...) Expand all
381 // but rarely updated. The max values get assigned only on the death thread, 425 // but rarely updated. The max values get assigned only on the death thread,
382 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the 426 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
383 // snapshot thread. 427 // snapshot thread.
384 base::subtle::Atomic32 run_duration_max_; 428 base::subtle::Atomic32 run_duration_max_;
385 base::subtle::Atomic32 queue_duration_max_; 429 base::subtle::Atomic32 queue_duration_max_;
386 // Samples, used by crowd sourcing gatherers. These are almost never read, 430 // Samples, used by crowd sourcing gatherers. These are almost never read,
387 // and rarely updated. They can be modified only on the death thread. 431 // and rarely updated. They can be modified only on the death thread.
388 base::subtle::Atomic32 run_duration_sample_; 432 base::subtle::Atomic32 run_duration_sample_;
389 base::subtle::Atomic32 queue_duration_sample_; 433 base::subtle::Atomic32 queue_duration_sample_;
390 434
435 // The cumulative number of allocation and free operations.
436 base::subtle::Atomic32 alloc_ops_;
437 base::subtle::Atomic32 free_ops_;
438
439 // The number of bytes allocated by the task.
440 base::subtle::Atomic32 allocated_bytes_;
441
442 // The number of bytes freed by the task.
443 base::subtle::Atomic32 freed_bytes_;
444
445 // The cumulative number of overhead bytes. Where available this yields an
446 // estimate of the heap overhead for allocations.
447 base::subtle::Atomic32 alloc_overhead_bytes_;
448
449 // The high-watermark for the number of outstanding heap allocated bytes.
450 base::subtle::Atomic32 max_allocated_bytes_;
451
391 // Snapshot of this death data made at the last profiling phase completion, if 452 // Snapshot of this death data made at the last profiling phase completion, if
392 // any. DeathData owns the whole list starting with this pointer. 453 // any. DeathData owns the whole list starting with this pointer.
393 // Can be accessed only on the snapshot thread. 454 // Can be accessed only on the snapshot thread.
394 const DeathDataPhaseSnapshot* last_phase_snapshot_; 455 const DeathDataPhaseSnapshot* last_phase_snapshot_;
395 456
396 DISALLOW_ASSIGN(DeathData); 457 DISALLOW_ASSIGN(DeathData);
397 }; 458 };
398 459
399 //------------------------------------------------------------------------------ 460 //------------------------------------------------------------------------------
400 // A temporary collection of data that can be sorted and summarized. It is 461 // A temporary collection of data that can be sorted and summarized. It is
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after
748 809
749 // Returns the start time. 810 // Returns the start time.
750 TrackedTime StartTime() const; 811 TrackedTime StartTime() const;
751 812
752 // Task's duration is calculated as the wallclock duration between starting 813 // Task's duration is calculated as the wallclock duration between starting
753 // and stopping this stopwatch, minus the wallclock durations of any other 814 // and stopping this stopwatch, minus the wallclock durations of any other
754 // instances that are immediately nested in this one, started and stopped on 815 // instances that are immediately nested in this one, started and stopped on
755 // this thread during that period. 816 // this thread during that period.
756 int32_t RunDurationMs() const; 817 int32_t RunDurationMs() const;
757 818
819 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
820 const base::debug::HeapUsageTracker& heap_usage() const;
821 #endif
822
758 // Returns tracking info for the current thread. 823 // Returns tracking info for the current thread.
759 ThreadData* GetThreadData() const; 824 ThreadData* GetThreadData() const;
760 825
761 private: 826 private:
762 // Time when the stopwatch was started. 827 // Time when the stopwatch was started.
763 TrackedTime start_time_; 828 TrackedTime start_time_;
764 829
830 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
831 base::debug::HeapUsageTracker heap_usage_;
832 #endif
833
765 // Wallclock duration of the task. 834 // Wallclock duration of the task.
766 int32_t wallclock_duration_ms_; 835 int32_t wallclock_duration_ms_;
767 836
768 // Tracking info for the current thread. 837 // Tracking info for the current thread.
769 ThreadData* current_thread_data_; 838 ThreadData* current_thread_data_;
770 839
771 // Sum of wallclock durations of all stopwatches that were directly nested in 840 // Sum of wallclock durations of all stopwatches that were directly nested in
772 // this one. 841 // this one.
773 int32_t excluded_duration_ms_; 842 int32_t excluded_duration_ms_;
774 843
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
811 ProcessDataSnapshot(const ProcessDataSnapshot& other); 880 ProcessDataSnapshot(const ProcessDataSnapshot& other);
812 ~ProcessDataSnapshot(); 881 ~ProcessDataSnapshot();
813 882
814 PhasedProcessDataSnapshotMap phased_snapshots; 883 PhasedProcessDataSnapshotMap phased_snapshots;
815 base::ProcessId process_id; 884 base::ProcessId process_id;
816 }; 885 };
817 886
818 } // namespace tracked_objects 887 } // namespace tracked_objects
819 888
820 #endif // BASE_TRACKED_OBJECTS_H_ 889 #endif // BASE_TRACKED_OBJECTS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698