Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(337)

Side by Side Diff: base/tracked_objects.h

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Fix remaining clang compile errors. Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACKED_OBJECTS_H_ 5 #ifndef BASE_TRACKED_OBJECTS_H_
6 #define BASE_TRACKED_OBJECTS_H_ 6 #define BASE_TRACKED_OBJECTS_H_
7 7
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <map> 10 #include <map>
11 #include <set> 11 #include <set>
12 #include <stack> 12 #include <stack>
13 #include <string> 13 #include <string>
14 #include <utility> 14 #include <utility>
15 #include <vector> 15 #include <vector>
16 16
17 #include "base/allocator/features.h"
17 #include "base/atomicops.h" 18 #include "base/atomicops.h"
18 #include "base/base_export.h" 19 #include "base/base_export.h"
19 #include "base/containers/hash_tables.h" 20 #include "base/containers/hash_tables.h"
21 #include "base/debug/debugging_flags.h"
22 #include "base/debug/thread_heap_usage_tracker.h"
20 #include "base/gtest_prod_util.h" 23 #include "base/gtest_prod_util.h"
21 #include "base/lazy_instance.h" 24 #include "base/lazy_instance.h"
22 #include "base/location.h" 25 #include "base/location.h"
23 #include "base/macros.h" 26 #include "base/macros.h"
24 #include "base/process/process_handle.h" 27 #include "base/process/process_handle.h"
25 #include "base/profiler/tracked_time.h" 28 #include "base/profiler/tracked_time.h"
26 #include "base/synchronization/lock.h" 29 #include "base/synchronization/lock.h"
27 #include "base/threading/thread_checker.h" 30 #include "base/threading/thread_checker.h"
28 #include "base/threading/thread_local_storage.h" 31 #include "base/threading/thread_local_storage.h"
29 32
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
241 // When we have a birth we update the count for this birthplace. 244 // When we have a birth we update the count for this birthplace.
242 void RecordBirth(); 245 void RecordBirth();
243 246
244 private: 247 private:
245 // The number of births on this thread for our location_. 248 // The number of births on this thread for our location_.
246 int birth_count_; 249 int birth_count_;
247 250
248 DISALLOW_COPY_AND_ASSIGN(Births); 251 DISALLOW_COPY_AND_ASSIGN(Births);
249 }; 252 };
250 253
254 // forward
Primiano Tucci (use gerrit) 2016/11/16 16:39:10 no need for the // forward annotation, it's genera
Sigurður Ásgeirsson 2016/11/16 21:30:28 Done.
255 class DeathData;
251 //------------------------------------------------------------------------------ 256 //------------------------------------------------------------------------------
252 // A "snapshotted" representation of the DeathData class. 257 // A "snapshotted" representation of the DeathData class.
253 258
254 struct BASE_EXPORT DeathDataSnapshot { 259 struct BASE_EXPORT DeathDataSnapshot {
255 DeathDataSnapshot(); 260 DeathDataSnapshot();
256 261
257 // Constructs the snapshot from individual values. 262 // Constructs the snapshot from individual values.
258 // The alternative would be taking a DeathData parameter, but this would 263 // The alternative would be taking a DeathData parameter, but this would
259 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing 264 // create a loop since DeathData indirectly refers DeathDataSnapshot. Passing
260 // a wrapper structure as a param or using an empty constructor for 265 // a wrapper structure as a param or using an empty constructor for
261 // snapshotting DeathData would be less efficient. 266 // snapshotting DeathData would be less efficient.
262 DeathDataSnapshot(int count, 267 DeathDataSnapshot(int count,
263 int32_t run_duration_sum, 268 int32_t run_duration_sum,
264 int32_t run_duration_max, 269 int32_t run_duration_max,
265 int32_t run_duration_sample, 270 int32_t run_duration_sample,
266 int32_t queue_duration_sum, 271 int32_t queue_duration_sum,
267 int32_t queue_duration_max, 272 int32_t queue_duration_max,
268 int32_t queue_duration_sample); 273 int32_t queue_duration_sample,
274 int32_t alloc_ops,
275 int32_t free_ops,
276 int32_t allocated_bytes,
277 int32_t freed_bytes,
278 int32_t alloc_overhead_bytes,
279 int32_t max_allocated_bytes);
280 DeathDataSnapshot(const DeathData& death_data);
281 DeathDataSnapshot(const DeathDataSnapshot& other);
269 ~DeathDataSnapshot(); 282 ~DeathDataSnapshot();
270 283
271 // Calculates and returns the delta between this snapshot and an earlier 284 // Calculates and returns the delta between this snapshot and an earlier
272 // snapshot of the same task |older|. 285 // snapshot of the same task |older|.
273 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const; 286 DeathDataSnapshot Delta(const DeathDataSnapshot& older) const;
274 287
275 int count; 288 int count;
276 int32_t run_duration_sum; 289 int32_t run_duration_sum;
277 int32_t run_duration_max; 290 int32_t run_duration_max;
278 int32_t run_duration_sample; 291 int32_t run_duration_sample;
279 int32_t queue_duration_sum; 292 int32_t queue_duration_sum;
280 int32_t queue_duration_max; 293 int32_t queue_duration_max;
281 int32_t queue_duration_sample; 294 int32_t queue_duration_sample;
295
296 int32_t alloc_ops;
297 int32_t free_ops;
298 int32_t allocated_bytes;
299 int32_t freed_bytes;
300 int32_t alloc_overhead_bytes;
301 int32_t max_allocated_bytes;
282 }; 302 };
283 303
284 //------------------------------------------------------------------------------ 304 //------------------------------------------------------------------------------
285 // A "snapshotted" representation of the DeathData for a particular profiling 305 // A "snapshotted" representation of the DeathData for a particular profiling
286 // phase. Used as an element of the list of phase snapshots owned by DeathData. 306 // phase. Used as an element of the list of phase snapshots owned by DeathData.
287 307
288 struct DeathDataPhaseSnapshot { 308 struct DeathDataPhaseSnapshot {
289 DeathDataPhaseSnapshot(int profiling_phase, 309 DeathDataPhaseSnapshot(int profiling_phase,
290 int count, 310 const DeathData& death_data,
291 int32_t run_duration_sum,
292 int32_t run_duration_max,
293 int32_t run_duration_sample,
294 int32_t queue_duration_sum,
295 int32_t queue_duration_max,
296 int32_t queue_duration_sample,
297 const DeathDataPhaseSnapshot* prev); 311 const DeathDataPhaseSnapshot* prev);
298 312
299 // Profiling phase at which completion this snapshot was taken. 313 // Profiling phase at which completion this snapshot was taken.
300 int profiling_phase; 314 int profiling_phase;
301 315
302 // Death data snapshot. 316 // Death data snapshot.
303 DeathDataSnapshot death_data; 317 DeathDataSnapshot death_data;
304 318
305 // Pointer to a snapshot from the previous phase. 319 // Pointer to a snapshot from the previous phase.
306 const DeathDataPhaseSnapshot* prev; 320 const DeathDataPhaseSnapshot* prev;
(...skipping 12 matching lines...) Expand all
319 // snapshotted. 333 // snapshotted.
320 334
321 class BASE_EXPORT DeathData { 335 class BASE_EXPORT DeathData {
322 public: 336 public:
323 DeathData(); 337 DeathData();
324 DeathData(const DeathData& other); 338 DeathData(const DeathData& other);
325 ~DeathData(); 339 ~DeathData();
326 340
327 // Update stats for a task destruction (death) that had a Run() time of 341 // Update stats for a task destruction (death) that had a Run() time of
328 // |duration|, and has had a queueing delay of |queue_duration|. 342 // |duration|, and has had a queueing delay of |queue_duration|.
329 void RecordDeath(const int32_t queue_duration, 343 void RecordDurations(const int32_t queue_duration,
330 const int32_t run_duration, 344 const int32_t run_duration,
331 const uint32_t random_number); 345 const uint32_t random_number);
Primiano Tucci (use gerrit) 2016/11/16 16:39:09 nit: add newline here
Sigurður Ásgeirsson 2016/11/16 21:30:28 Done.
346 // Update stats for a task destruction that performed |alloc_ops|
347 // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed
348 // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap
349 // overhead, and where at most |max_allocated_bytes| were outstanding at any
350 // one time.
351 // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated
352 // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops|
353 // yields the average size of allocation.
354 // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory
355 // usage of the task, which can be negative.
356 void RecordAllocations(const uint32_t alloc_ops,
357 const uint32_t free_ops,
358 const uint32_t allocated_bytes,
359 const uint32_t freed_bytes,
360 const uint32_t alloc_overhead_bytes,
361 const uint32_t max_allocated_bytes);
332 362
333 // Metrics and past snapshots accessors, used only for serialization and in 363 // Metrics and past snapshots accessors, used only for serialization and in
334 // tests. 364 // tests.
335 int count() const { return base::subtle::NoBarrier_Load(&count_); } 365 int count() const { return base::subtle::NoBarrier_Load(&count_); }
336 int32_t run_duration_sum() const { 366 int32_t run_duration_sum() const {
337 return base::subtle::NoBarrier_Load(&run_duration_sum_); 367 return base::subtle::NoBarrier_Load(&run_duration_sum_);
338 } 368 }
339 int32_t run_duration_max() const { 369 int32_t run_duration_max() const {
340 return base::subtle::NoBarrier_Load(&run_duration_max_); 370 return base::subtle::NoBarrier_Load(&run_duration_max_);
341 } 371 }
342 int32_t run_duration_sample() const { 372 int32_t run_duration_sample() const {
343 return base::subtle::NoBarrier_Load(&run_duration_sample_); 373 return base::subtle::NoBarrier_Load(&run_duration_sample_);
344 } 374 }
345 int32_t queue_duration_sum() const { 375 int32_t queue_duration_sum() const {
346 return base::subtle::NoBarrier_Load(&queue_duration_sum_); 376 return base::subtle::NoBarrier_Load(&queue_duration_sum_);
347 } 377 }
348 int32_t queue_duration_max() const { 378 int32_t queue_duration_max() const {
349 return base::subtle::NoBarrier_Load(&queue_duration_max_); 379 return base::subtle::NoBarrier_Load(&queue_duration_max_);
350 } 380 }
351 int32_t queue_duration_sample() const { 381 int32_t queue_duration_sample() const {
352 return base::subtle::NoBarrier_Load(&queue_duration_sample_); 382 return base::subtle::NoBarrier_Load(&queue_duration_sample_);
353 } 383 }
384 int32_t alloc_ops() const {
385 return base::subtle::NoBarrier_Load(&alloc_ops_);
386 }
387 int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
388 int32_t allocated_bytes() const {
389 return base::subtle::NoBarrier_Load(&allocated_bytes_);
390 }
391 int32_t freed_bytes() const {
392 return base::subtle::NoBarrier_Load(&freed_bytes_);
393 }
394 int32_t alloc_overhead_bytes() const {
395 return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_);
396 }
397 int32_t max_allocated_bytes() const {
398 return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
399 }
354 const DeathDataPhaseSnapshot* last_phase_snapshot() const { 400 const DeathDataPhaseSnapshot* last_phase_snapshot() const {
355 return last_phase_snapshot_; 401 return last_phase_snapshot_;
356 } 402 }
357 403
358 // Called when the current profiling phase, identified by |profiling_phase|, 404 // Called when the current profiling phase, identified by |profiling_phase|,
359 // ends. 405 // ends.
360 // Must be called only on the snapshot thread. 406 // Must be called only on the snapshot thread.
361 void OnProfilingPhaseCompleted(int profiling_phase); 407 void OnProfilingPhaseCompleted(int profiling_phase);
362 408
363 private: 409 private:
(...skipping 17 matching lines...) Expand all
381 // but rarely updated. The max values get assigned only on the death thread, 427 // but rarely updated. The max values get assigned only on the death thread,
382 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the 428 // but these fields can be set to 0 by OnProfilingPhaseCompleted() on the
383 // snapshot thread. 429 // snapshot thread.
384 base::subtle::Atomic32 run_duration_max_; 430 base::subtle::Atomic32 run_duration_max_;
385 base::subtle::Atomic32 queue_duration_max_; 431 base::subtle::Atomic32 queue_duration_max_;
386 // Samples, used by crowd sourcing gatherers. These are almost never read, 432 // Samples, used by crowd sourcing gatherers. These are almost never read,
387 // and rarely updated. They can be modified only on the death thread. 433 // and rarely updated. They can be modified only on the death thread.
388 base::subtle::Atomic32 run_duration_sample_; 434 base::subtle::Atomic32 run_duration_sample_;
389 base::subtle::Atomic32 queue_duration_sample_; 435 base::subtle::Atomic32 queue_duration_sample_;
390 436
437 // The cumulative number of allocation and free operations.
438 base::subtle::Atomic32 alloc_ops_;
439 base::subtle::Atomic32 free_ops_;
440
441 // The number of bytes allocated by the task.
442 base::subtle::Atomic32 allocated_bytes_;
443
444 // The number of bytes freed by the task.
445 base::subtle::Atomic32 freed_bytes_;
446
447 // The cumulative number of overhead bytes. Where available this yields an
448 // estimate of the heap overhead for allocations.
449 base::subtle::Atomic32 alloc_overhead_bytes_;
450
451 // The high-watermark for the number of outstanding heap allocated bytes.
452 base::subtle::Atomic32 max_allocated_bytes_;
453
391 // Snapshot of this death data made at the last profiling phase completion, if 454 // Snapshot of this death data made at the last profiling phase completion, if
392 // any. DeathData owns the whole list starting with this pointer. 455 // any. DeathData owns the whole list starting with this pointer.
393 // Can be accessed only on the snapshot thread. 456 // Can be accessed only on the snapshot thread.
394 const DeathDataPhaseSnapshot* last_phase_snapshot_; 457 const DeathDataPhaseSnapshot* last_phase_snapshot_;
395 458
396 DISALLOW_ASSIGN(DeathData); 459 DISALLOW_ASSIGN(DeathData);
397 }; 460 };
398 461
399 //------------------------------------------------------------------------------ 462 //------------------------------------------------------------------------------
400 // A temporary collection of data that can be sorted and summarized. It is 463 // A temporary collection of data that can be sorted and summarized. It is
(...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after
748 811
749 // Returns the start time. 812 // Returns the start time.
750 TrackedTime StartTime() const; 813 TrackedTime StartTime() const;
751 814
752 // Task's duration is calculated as the wallclock duration between starting 815 // Task's duration is calculated as the wallclock duration between starting
753 // and stopping this stopwatch, minus the wallclock durations of any other 816 // and stopping this stopwatch, minus the wallclock durations of any other
754 // instances that are immediately nested in this one, started and stopped on 817 // instances that are immediately nested in this one, started and stopped on
755 // this thread during that period. 818 // this thread during that period.
756 int32_t RunDurationMs() const; 819 int32_t RunDurationMs() const;
757 820
821 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
822 const base::debug::ThreadHeapUsageTracker& heap_usage() const;
Primiano Tucci (use gerrit) 2016/11/16 16:39:09 why not inlining also this accessor?
Sigurður Ásgeirsson 2016/11/16 21:30:28 Thanks, done.
823 bool heap_tracking_enabled() const { return heap_tracking_enabled_; }
824 #endif
825
758 // Returns tracking info for the current thread. 826 // Returns tracking info for the current thread.
759 ThreadData* GetThreadData() const; 827 ThreadData* GetThreadData() const;
760 828
761 private: 829 private:
762 // Time when the stopwatch was started. 830 // Time when the stopwatch was started.
763 TrackedTime start_time_; 831 TrackedTime start_time_;
764 832
833 #if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
834 base::debug::ThreadHeapUsageTracker heap_usage_;
835 bool heap_tracking_enabled_;
836 #endif
837
765 // Wallclock duration of the task. 838 // Wallclock duration of the task.
766 int32_t wallclock_duration_ms_; 839 int32_t wallclock_duration_ms_;
767 840
768 // Tracking info for the current thread. 841 // Tracking info for the current thread.
769 ThreadData* current_thread_data_; 842 ThreadData* current_thread_data_;
770 843
771 // Sum of wallclock durations of all stopwatches that were directly nested in 844 // Sum of wallclock durations of all stopwatches that were directly nested in
772 // this one. 845 // this one.
773 int32_t excluded_duration_ms_; 846 int32_t excluded_duration_ms_;
774 847
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
811 ProcessDataSnapshot(const ProcessDataSnapshot& other); 884 ProcessDataSnapshot(const ProcessDataSnapshot& other);
812 ~ProcessDataSnapshot(); 885 ~ProcessDataSnapshot();
813 886
814 PhasedProcessDataSnapshotMap phased_snapshots; 887 PhasedProcessDataSnapshotMap phased_snapshots;
815 base::ProcessId process_id; 888 base::ProcessId process_id;
816 }; 889 };
817 890
818 } // namespace tracked_objects 891 } // namespace tracked_objects
819 892
820 #endif // BASE_TRACKED_OBJECTS_H_ 893 #endif // BASE_TRACKED_OBJECTS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698