OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/debug/trace_memory.h" | |
6 | |
7 #include "base/debug/leak_annotations.h" | |
8 #include "base/debug/trace_event.h" | |
9 #include "base/lazy_instance.h" | |
10 #include "base/logging.h" | |
11 #include "base/memory/scoped_ptr.h" | |
12 #include "base/message_loop.h" | |
13 #include "base/threading/thread_local.h" | |
14 | |
15 // TODO(jamescook): Windows support for memory tracing. | |
16 #if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined( OS_ANDROID)) | |
dsinclair
2013/06/18 15:30:15
Could this be done through gyp? Have a trace_memor
James Cook
2013/06/29 00:02:42
I think we follow this pattern of using ifdefs oth
| |
17 #include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h" | |
18 #endif | |
19 | |
20 namespace base { | |
21 namespace debug { | |
22 | |
23 namespace { | |
24 | |
25 // Maximum number of nested TRACE_MEMORY scopes to record. Must be greater than | |
26 // or equal to HeapProfileTable::kMaxStackDepth. | |
27 const int kMaxStackSize = 32; | |
28 | |
29 ///////////////////////////////////////////////////////////////////////////// | |
30 // Holds a memory dump until the tracing system needs to serialize it. | |
31 class MemoryDumpHolder : public base::debug::ConvertableToTraceFormat { | |
32 public: | |
33 // Takes ownership of dump, which must be a JSON string, allocated with | |
34 // malloc() and NULL terminated. | |
35 explicit MemoryDumpHolder(char* dump) : dump_(dump) {} | |
36 virtual ~MemoryDumpHolder() { free(dump_); } | |
37 | |
38 // base::debug::ConvertableToTraceFormat overrides: | |
39 virtual void AppendAsTraceFormat(std::string* out) const OVERRIDE { | |
40 out->append(dump_); | |
41 } | |
42 | |
43 private: | |
44 char* dump_; | |
45 | |
46 DISALLOW_COPY_AND_ASSIGN(MemoryDumpHolder); | |
47 }; | |
48 | |
49 ///////////////////////////////////////////////////////////////////////////// | |
50 // Records a stack of TRACE_MEMORY events. One per thread is required. | |
51 struct TraceMemoryStack { | |
52 TraceMemoryStack() : index_(0) { | |
53 memset(category_stack_, 0, kMaxStackSize * sizeof(const char*)); | |
54 } | |
55 | |
56 // Points to the next free entry. | |
57 int index_; | |
58 const char* category_stack_[kMaxStackSize]; | |
59 }; | |
60 | |
61 // One stack of TRACE_MEMORY event data per thread. | |
62 LazyInstance<ThreadLocalPointer<TraceMemoryStack> >::Leaky trace_memory_stack = | |
dsinclair
2013/06/18 15:30:15
Not sure if it's in the styleguide, but in trace_e
James Cook
2013/06/29 00:02:42
Style guide says it is optional to use g_, but I l
| |
63 LAZY_INSTANCE_INITIALIZER; | |
64 | |
65 // Initializes the thread-local trace memory stack and returns it. | |
66 TraceMemoryStack* InitTraceMemoryStack() { | |
67 DCHECK(!trace_memory_stack.Get().Get()); | |
68 // Intentionally leak one stack per thread. | |
69 TraceMemoryStack* leaked_stack = new TraceMemoryStack; | |
70 ANNOTATE_LEAKING_OBJECT_PTR(leaked_stack); | |
71 trace_memory_stack.Get().Set(leaked_stack); | |
72 return trace_memory_stack.Get().Get(); | |
73 } | |
74 | |
75 // Returns a "pseudo-stack" of pointers to trace events. | |
76 // TODO(jamescook): Record both category and name, perhaps in a pair for speed. | |
77 int GetPseudoStack(void** stack_out) { | |
dsinclair
2013/06/18 15:30:15
category_stack_ has const char*, why use void* ins
| |
78 TraceMemoryStack* stack = trace_memory_stack.Get().Get(); | |
79 // If the tracing system isn't fully initialized, just skip this allocation. | |
80 // Attempting to initialize will allocate memory, causing this function to | |
81 // be called recursively from inside the allocator. | |
82 if (!stack) | |
83 return 0; | |
84 // Copy out a maximum of kMaxStackSize stack entries. | |
85 const int count = | |
86 stack->index_ < kMaxStackSize ? stack->index_ : kMaxStackSize; | |
87 // Notes that memcpy() works for zero bytes. | |
88 memcpy(stack_out, stack->category_stack_, count * sizeof(void*)); | |
89 return count; | |
90 } | |
91 | |
92 // Caller owns the returned char* and must release it with free(). | |
93 char* TraceMemoryDumpAsString() { | |
94 #if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined( OS_ANDROID)) | |
95 DVLOG(1) << "TraceMemoryDumpAsString"; | |
96 return ::GetHeapProfile(); | |
97 #else | |
98 NOTREACHED(); | |
99 return NULL; | |
100 #endif | |
101 } | |
102 | |
103 // If memory tracing is enabled, dumps a memory profile to the tracing system. | |
104 void DumpMemoryProfile() { | |
105 DVLOG(1) << "DumpMemoryProfile"; | |
106 // Don't trace allocations here in the memory tracing system. | |
107 TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), TRACE_MEMORY_IGNORE); | |
108 // Check to see if tracing is enabled for the memory category. | |
109 bool enabled; | |
110 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("memory"), | |
111 &enabled); | |
112 if (enabled) { | |
113 // MemoryDumpHolder takes ownership of this string. | |
114 char* dump = TraceMemoryDumpAsString(); | |
115 scoped_ptr<MemoryDumpHolder> dump_holder(new MemoryDumpHolder(dump)); | |
116 const int kSnapshotId = 1; | |
117 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | |
118 "memory", | |
dsinclair
2013/06/18 15:30:15
TRACE_DISABLED_BY_DEFAULT("memory") ?
James Cook
2013/06/29 00:02:42
Yes, good catch.
| |
119 "memory::Heap", | |
120 kSnapshotId, | |
121 dump_holder.PassAs<base::debug::ConvertableToTraceFormat>()); | |
122 } | |
123 } | |
124 | |
125 void TraceMemoryStart() { | |
126 #if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined( OS_ANDROID)) | |
127 DVLOG(1) << "Starting trace memory"; | |
128 // Ensure thread-local-storage is initialized by creating a dummy event. | |
129 ScopedTraceMemory initialize(TRACE_MEMORY_IGNORE); | |
dsinclair
2013/06/18 15:30:15
Wouldn't it be easier to get just get the stack an
James Cook
2013/06/29 00:02:42
Changed InitTraceMemoryStack() to GetTraceMemorySt
| |
130 ::SetPseudoStackGenerator(&GetPseudoStack); | |
131 ::HeapProfilerStart(NULL); | |
132 #else | |
133 NOTREACHED(); | |
134 #endif | |
135 } | |
136 | |
137 void TraceMemoryStop() { | |
138 #if !defined(NO_TCMALLOC) && !defined(OS_NACL) && (defined(OS_LINUX) || defined( OS_ANDROID)) | |
139 DVLOG(1) << "Stopping trace memory"; | |
140 ::HeapProfilerStop(); | |
141 #else | |
142 NOTREACHED(); | |
143 #endif | |
144 } | |
145 | |
146 } // namespace | |
147 | |
148 ////////////////////////////////////////////////////////////////////////////// | |
149 | |
150 TraceMemoryTraceLogObserver::TraceMemoryTraceLogObserver( | |
151 scoped_refptr<MessageLoopProxy> message_loop_proxy) | |
152 : message_loop_proxy_(message_loop_proxy), | |
153 weak_factory_(this) { | |
154 TraceLog::GetInstance()->AddEnabledStateObserver(this); | |
155 } | |
156 | |
157 TraceMemoryTraceLogObserver::~TraceMemoryTraceLogObserver() { | |
158 if (dump_timer_.IsRunning()) | |
159 StopProfiling(); | |
160 TraceLog::GetInstance()->RemoveEnabledStateObserver(this); | |
161 } | |
162 | |
163 // base::debug::TraceLog::EnabledStateChangedObserver overrides: | |
164 void TraceMemoryTraceLogObserver::OnTraceLogEnabled() { | |
165 DVLOG(1) << "OnTraceLogEnabled"; | |
166 DCHECK(message_loop_proxy_->PostTask( | |
167 FROM_HERE, | |
168 base::Bind(&TraceMemoryTraceLogObserver::StartProfiling, | |
169 weak_factory_.GetWeakPtr()))); | |
170 } | |
171 | |
172 void TraceMemoryTraceLogObserver::OnTraceLogDisabled() { | |
173 DVLOG(1) << "OnTraceLogDisabled"; | |
174 DCHECK(message_loop_proxy_->PostTask( | |
175 FROM_HERE, | |
176 base::Bind(&TraceMemoryTraceLogObserver::StopProfiling, | |
177 weak_factory_.GetWeakPtr()))); | |
178 } | |
179 | |
180 void TraceMemoryTraceLogObserver::StartProfiling() { | |
181 TraceMemoryStart(); | |
dsinclair
2013/06/18 15:30:15
Should this do a:
if (dump_timer_.IsRunning())
James Cook
2013/06/29 00:02:42
Good idea. Done.
| |
182 const int kDumpIntervalSeconds = 5; | |
183 dump_timer_.Start(FROM_HERE, | |
184 TimeDelta::FromSeconds(kDumpIntervalSeconds), | |
185 base::Bind(&DumpMemoryProfile)); | |
186 } | |
187 | |
188 void TraceMemoryTraceLogObserver::StopProfiling() { | |
189 dump_timer_.Stop(); | |
190 TraceMemoryStop(); | |
191 } | |
192 | |
193 bool TraceMemoryTraceLogObserver::IsTimerRunningForTest() const { | |
194 return dump_timer_.IsRunning(); | |
195 } | |
196 | |
197 ///////////////////////////////////////////////////////////////////////////// | |
198 | |
199 ScopedTraceMemory::ScopedTraceMemory(const char* category) { | |
200 // Get our thread's copy of the stack. | |
201 TraceMemoryStack* stack = trace_memory_stack.Get().Get(); | |
202 if (!stack) | |
203 stack = InitTraceMemoryStack(); | |
dsinclair
2013/06/18 15:30:15
If InitTraceMemoryStack() always returns this can
James Cook
2013/06/29 00:02:42
Done.
| |
204 const int index = stack->index_; | |
205 // Allow deep nesting of stacks (needed for tests), but only record | |
206 // |kMaxStackSize| entries. | |
207 if (index < kMaxStackSize) | |
208 stack->category_stack_[index] = category; | |
209 stack->index_++; | |
210 } | |
211 | |
212 ScopedTraceMemory::~ScopedTraceMemory() { | |
213 // Get our thread's copy of the stack. | |
214 TraceMemoryStack* stack = trace_memory_stack.Get().Get(); | |
215 stack->index_--; | |
216 DCHECK_GE(stack->index_, 0) << "stack underflow"; | |
217 } | |
218 | |
219 // static | |
220 int ScopedTraceMemory::GetStackIndexForTest() { | |
221 TraceMemoryStack* stack = trace_memory_stack.Get().Get(); | |
222 if (!stack) | |
223 stack = InitTraceMemoryStack(); | |
224 return stack->index_; | |
225 } | |
226 | |
227 // static | |
228 const char* ScopedTraceMemory::GetItemForTest(int index) { | |
229 TraceMemoryStack* stack = trace_memory_stack.Get().Get(); | |
230 if (!stack) | |
231 stack = InitTraceMemoryStack(); | |
232 return stack->category_stack_[index]; | |
233 } | |
234 | |
235 } // namespace debug | |
236 } // namespace base | |
OLD | NEW |