OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
7 | 7 |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
11 #include "base/base_export.h" | 11 #include "base/base_export.h" |
12 #include "base/logging.h" | 12 #include "base/logging.h" |
13 #include "base/macros.h" | 13 #include "base/macros.h" |
14 #include "base/trace_event/heap_profiler_allocation_context.h" | 14 #include "base/trace_event/heap_profiler_allocation_context.h" |
15 | 15 |
16 namespace base { | 16 namespace base { |
17 namespace trace_event { | 17 namespace trace_event { |
18 | 18 |
19 // The allocation context tracker keeps track of thread-local context for heap | 19 // The allocation context tracker keeps track of thread-local context for heap |
20 // profiling. It includes a pseudo stack of trace events. On every allocation | 20 // profiling. It includes a pseudo stack of trace events. On every allocation |
21 // the tracker provides a snapshot of its context in the form of an | 21 // the tracker provides a snapshot of its context in the form of an |
22 // |AllocationContext| that is to be stored together with the allocation | 22 // |AllocationContext| that is to be stored together with the allocation |
23 // details. | 23 // details. |
24 class BASE_EXPORT AllocationContextTracker { | 24 class BASE_EXPORT AllocationContextTracker { |
25 public: | 25 public: |
26 enum InitializationState { kNotInitialized = 0, kInitializing, kInitialized }; | |
27 | |
26 // Globally enables capturing allocation context. | 28 // Globally enables capturing allocation context. |
27 // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future? | 29 // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future? |
28 // Or at least have something that guards agains enable -> disable -> enable? | 30 // Or at least have something that guards agains enable -> disable -> enable? |
29 static void SetCaptureEnabled(bool enabled); | 31 static void SetCaptureEnabled(bool enabled); |
30 | 32 |
33 // Exposes the initialization state of the tracker. This is to avoid | |
34 // re-entrancy in the malloc heap profiler, that will lazy initialize the | |
petrcermak
2016/03/11 11:09:23
nit: s/that/which/ (you can't have "that" after a
petrcermak
2016/03/11 11:09:23
nit: s/lazy/lazily/
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
| |
35 // thread-local context trackes on the first malloc seen, causing a nested | |
petrcermak
2016/03/11 11:09:23
s/trackes/trackers/
petrcermak
2016/03/11 11:09:23
supernit: wouldn't "first encountered malloc" read
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
Done.
Primiano Tucci (use gerrit)
2016/03/11 13:57:57
actually was "tracker" , just typoed r/s
| |
36 // allocation. | |
37 static InitializationState GetStateForCurrentThread(); | |
38 | |
39 // Enforces the initialization of the tracker. Subsequent calls to the other | |
40 // methods of this class are supposed to not cause any further malloc/new. | |
41 static AllocationContextTracker* InitializeForCurrentThread(); | |
42 | |
31 // Returns whether capturing allocation context is enabled globally. | 43 // Returns whether capturing allocation context is enabled globally. |
32 inline static bool capture_enabled() { | 44 inline static bool capture_enabled() { |
33 // A little lag after heap profiling is enabled or disabled is fine, it is | 45 // A little lag after heap profiling is enabled or disabled is fine, it is |
34 // more important that the check is as cheap as possible when capturing is | 46 // more important that the check is as cheap as possible when capturing is |
35 // not enabled, so do not issue a memory barrier in the fast path. | 47 // not enabled, so do not issue a memory barrier in the fast path. |
36 if (subtle::NoBarrier_Load(&capture_enabled_) == 0) | 48 if (subtle::NoBarrier_Load(&capture_enabled_) == 0) |
37 return false; | 49 return false; |
38 | 50 |
39 // In the slow path, an acquire load is required to pair with the release | 51 // In the slow path, an acquire load is required to pair with the release |
40 // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for | 52 // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for |
(...skipping 23 matching lines...) Expand all Loading... | |
64 // The pseudo stack where frames are |TRACE_EVENT| names. | 76 // The pseudo stack where frames are |TRACE_EVENT| names. |
65 std::vector<StackFrame> pseudo_stack_; | 77 std::vector<StackFrame> pseudo_stack_; |
66 | 78 |
67 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker); | 79 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker); |
68 }; | 80 }; |
69 | 81 |
70 } // namespace trace_event | 82 } // namespace trace_event |
71 } // namespace base | 83 } // namespace base |
72 | 84 |
73 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 85 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
OLD | NEW |