OLD | NEW |
| (Empty) |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // Trace events are for tracking application performance. | |
6 // | |
7 // Events are issued against categories. Whereas LOG's | |
8 // categories are statically defined, TRACE categories are created | |
9 // implicitly with a string. For example: | |
10 // GPU_TRACE_EVENT_INSTANT0("MY_SUBSYSTEM", "SomeImportantEvent") | |
11 // | |
12 // Events can be INSTANT, or can be pairs of BEGIN and END: | |
13 // GPU_TRACE_EVENT_BEGIN0("MY_SUBSYSTEM", "SomethingCostly") | |
14 // doSomethingCostly() | |
15 // GPU_TRACE_EVENT_END0("MY_SUBSYSTEM", "SomethingCostly") | |
16 // | |
17 // A common use case is to trace entire function scopes. This | |
18 // issues a trace BEGIN and END automatically: | |
19 // void doSomethingCostly() { | |
20 // GPU_TRACE_EVENT0("MY_SUBSYSTEM", "doSomethingCostly"); | |
21 // ... | |
22 // } | |
23 // | |
24 // Additional parameters can be associated with an event: | |
25 // void doSomethingCostly2(int howMuch) { | |
26 // GPU_TRACE_EVENT1("MY_SUBSYSTEM", "doSomethingCostly", | |
27 // "howMuch", StringPrintf("%i", howMuch).c_str()); | |
28 // ... | |
29 // } | |
30 // | |
31 // The trace system will automatically add to this information the | |
32 // current process id, thread id, a timestamp down to the | |
33 // microsecond, as well as the file and line number of the calling location. | |
34 // | |
35 // By default, trace collection is compiled in, but turned off at runtime. | |
36 // Collecting trace data is the responsibility of the embedding | |
37 // application. In Chrome's case, navigating to about:gpu will turn on | |
38 // tracing and display data collected across all active processes. | |
39 // | |
40 | |
41 #ifndef GPU_TRACE_EVENT_H_ | |
42 #define GPU_TRACE_EVENT_H_ | |
43 #pragma once | |
44 | |
45 #if defined(__native_client__) | |
46 | |
47 // Native Client needs to avoid pulling in base/ headers, | |
48 // so stub out the tracing code at compile time. | |
49 #define GPU_TRACE_EVENT0(x0, x1) { } | |
50 #define GPU_TRACE_EVENT1(x0, x1, x2, x3) { } | |
51 #define GPU_TRACE_EVENT2(x0, x1, x2, x3, x4, x5) { } | |
52 #define GPU_TRACE_EVENT_INSTANT0(x0, x1) { } | |
53 #define GPU_TRACE_EVENT_INSTANT1(x0, x1, x2, x3) { } | |
54 #define GPU_TRACE_EVENT_INSTANT2(x0, x1, x2, x3, x4, x5) { } | |
55 #define GPU_TRACE_BEGIN0(x0, x1) { } | |
56 #define GPU_TRACE_BEGIN1(x0, x1, x2, x3) { } | |
57 #define GPU_TRACE_BEGIN2(x0, x1, x2, x3, x4, x5) { } | |
58 #define GPU_TRACE_END0(x0, x1) { } | |
59 #define GPU_TRACE_END1(x0, x1, x2, x3) { } | |
60 #define GPU_TRACE_END2(x0, x1, x2, x3, x4, x5) { } | |
61 | |
62 #else | |
63 | |
64 #include "build/build_config.h" | |
65 | |
66 #include <string> | |
67 | |
68 #include "base/memory/scoped_ptr.h" | |
69 #include "base/atomicops.h" | |
70 #include "base/memory/scoped_vector.h" | |
71 #include "base/memory/singleton.h" | |
72 #include "base/time.h" | |
73 #include "base/timer.h" | |
74 #include "base/callback.h" | |
75 #include "base/string_util.h" | |
76 #include <vector> | |
77 | |
78 | |
79 // Implementation detail: trace event macros create temporary variables | |
80 // to keep instrumentation overhead low. These macros give each temporary | |
81 // variable a unique name based on the line number to prevent name collissions. | |
82 #define GPU_TRACE_EVENT_UNIQUE_IDENTIFIER3(a,b) a##b | |
83 #define GPU_TRACE_EVENT_UNIQUE_IDENTIFIER2(a,b) \ | |
84 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER3(a,b) | |
85 #define GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(name_prefix) \ | |
86 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER2(name_prefix, __LINE__) | |
87 | |
88 // Records a pair of begin and end events called "name" for the current | |
89 // scope, with 0, 1 or 2 associated arguments. If the category is not | |
90 // enabled, then this does nothing. | |
91 #define GPU_TRACE_EVENT0(category, name) \ | |
92 GPU_TRACE_EVENT1(category, name, NULL, 0) | |
93 #define GPU_TRACE_EVENT1(category, name, arg1name, arg1val) \ | |
94 GPU_TRACE_EVENT2(category, name, arg1name, arg1val, NULL, 0) | |
95 #define GPU_TRACE_EVENT2(category, name, arg1name, arg1val, arg2name, arg2val) \ | |
96 static gpu::TraceCategory* \ | |
97 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic) = \ | |
98 gpu::TraceLog::GetInstance()->GetCategory(category); \ | |
99 if (base::subtle::Acquire_Load(\ | |
100 &(GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic))->enabled_)) { \ | |
101 gpu::TraceLog::GetInstance()->AddTraceEvent( \ | |
102 gpu::GPU_TRACE_EVENT_PHASE_BEGIN, \ | |
103 __FILE__, __LINE__, \ | |
104 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic), \ | |
105 name, \ | |
106 arg1name, arg1val, \ | |
107 arg2name, arg2val); \ | |
108 } \ | |
109 gpu::internal::TraceEndOnScopeClose __profileScope ## __LINE ( \ | |
110 __FILE__, __LINE__, \ | |
111 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic), name); | |
112 | |
113 // Records a single event called "name" immediately, with 0, 1 or 2 | |
114 // associated arguments. If the category is not enabled, then this | |
115 // does nothing. | |
116 #define GPU_TRACE_EVENT_INSTANT0(category, name) \ | |
117 GPU_TRACE_EVENT_INSTANT1(category, name, NULL, 0) | |
118 #define GPU_TRACE_EVENT_INSTANT1(category, name, arg1name, arg1val) \ | |
119 GPU_TRACE_EVENT_INSTANT2(category, name, arg1name, arg1val, NULL, 0) | |
120 #define GPU_TRACE_EVENT_INSTANT2(category, name, arg1name, arg1val, \ | |
121 arg2name, arg2val) \ | |
122 static gpu::TraceCategory* \ | |
123 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic) = \ | |
124 gpu::TraceLog::GetInstance()->GetCategory(category); \ | |
125 if (base::subtle::Acquire_Load( \ | |
126 &(GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic))->enabled_)) { \ | |
127 gpu::TraceLog::GetInstance()->AddTraceEvent( \ | |
128 gpu::GPU_TRACE_EVENT_PHASE_INSTANT, \ | |
129 __FILE__, __LINE__, \ | |
130 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic), \ | |
131 name, \ | |
132 arg1name, arg1val, \ | |
133 arg2name, arg2val); \ | |
134 } | |
135 | |
136 // Records a single BEGIN event called "name" immediately, with 0, 1 or 2 | |
137 // associated arguments. If the category is not enabled, then this | |
138 // does nothing. | |
139 #define GPU_TRACE_EVENT_BEGIN0(category, name) \ | |
140 GPU_TRACE_EVENT_BEGIN1(category, name, NULL, 0) | |
141 #define GPU_TRACE_EVENT_BEGIN1(category, name, arg1name, arg1val) \ | |
142 GPU_TRACE_EVENT_BEGIN2(category, name, arg1name, arg1val, NULL, 0) | |
143 #define GPU_TRACE_EVENT_BEGIN2(category, name, arg1name, arg1val, \ | |
144 arg2name, arg2val) \ | |
145 static gpu::TraceCategory* \ | |
146 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic) = \ | |
147 gpu::TraceLog::GetInstance()->GetCategory(category); \ | |
148 if (base::subtle::Acquire_Load( \ | |
149 &(GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic))->enabled_)) { \ | |
150 gpu::TraceLog::GetInstance()->AddTraceEvent( \ | |
151 gpu::GPU_TRACE_EVENT_PHASE_BEGIN, \ | |
152 __FILE__, __LINE__, \ | |
153 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic), \ | |
154 name, \ | |
155 arg1name, arg1val, \ | |
156 arg2name, arg2val); \ | |
157 } | |
158 | |
159 // Records a single END event for "name" immediately. If the category | |
160 // is not enabled, then this does nothing. | |
161 #define GPU_TRACE_EVENT_END0(category, name) \ | |
162 static gpu::TraceCategory* \ | |
163 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic) = \ | |
164 gpu::TraceLog::GetInstance()->GetCategory(category); \ | |
165 if (base::subtle::Acquire_Load( \ | |
166 &(GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic))->enabled_)) { \ | |
167 gpu::TraceLog::GetInstance()->AddTraceEvent( \ | |
168 gpu::GPU_TRACE_EVENT_PHASE_END, \ | |
169 __FILE__, __LINE__, \ | |
170 GPU_TRACE_EVENT_UNIQUE_IDENTIFIER(catstatic), \ | |
171 name, \ | |
172 NULL, 0, \ | |
173 NULL, 0); \ | |
174 } | |
175 | |
176 | |
177 namespace gpu { | |
178 | |
179 // Categories allow enabling/disabling of streams of trace events | |
180 // Don't manipulate the category object directly, as this may lead | |
181 // to threading issues. Use the TraceLog methods instead. | |
182 class TraceCategory { | |
183 public: | |
184 TraceCategory(); | |
185 ~TraceCategory(); | |
186 | |
187 void set(const char* name, bool enabled) { | |
188 name_ = name; | |
189 base::subtle::NoBarrier_Store(&enabled_, | |
190 static_cast<base::subtle::Atomic32>(enabled)); | |
191 } | |
192 | |
193 const char* name() const { return name_; } | |
194 | |
195 // NEVER read these directly, let the macros do it for you | |
196 volatile base::subtle::Atomic32 enabled_; | |
197 protected: | |
198 const char* name_; | |
199 }; | |
200 | |
201 #define TRACE_MAX_NUM_ARGS 2 | |
202 | |
203 enum TraceEventPhase { | |
204 GPU_TRACE_EVENT_PHASE_BEGIN, | |
205 GPU_TRACE_EVENT_PHASE_END, | |
206 GPU_TRACE_EVENT_PHASE_INSTANT | |
207 }; | |
208 | |
209 // Simple union of values. This is much lighter weight than base::Value, which | |
210 // requires dynamic allocation and a vtable. To keep the trace runtime overhead | |
211 // low, we want constant size storage here. | |
212 class TraceValue { | |
213 public: | |
214 enum Type { | |
215 TRACE_TYPE_UNDEFINED, | |
216 TRACE_TYPE_BOOL, | |
217 TRACE_TYPE_UINT, | |
218 TRACE_TYPE_INT, | |
219 TRACE_TYPE_DOUBLE, | |
220 TRACE_TYPE_POINTER, | |
221 TRACE_TYPE_STRING | |
222 }; | |
223 | |
224 TraceValue() : type_(TRACE_TYPE_UNDEFINED) { | |
225 value_.as_uint = 0ull; | |
226 } | |
227 TraceValue(bool rhs) : type_(TRACE_TYPE_BOOL) { | |
228 value_.as_uint = 0ull; // zero all bits | |
229 value_.as_bool = rhs; | |
230 } | |
231 TraceValue(uint64 rhs) : type_(TRACE_TYPE_UINT) { | |
232 value_.as_uint = rhs; | |
233 } | |
234 TraceValue(uint32 rhs) : type_(TRACE_TYPE_UINT) { | |
235 value_.as_uint = rhs; | |
236 } | |
237 TraceValue(uint16 rhs) : type_(TRACE_TYPE_UINT) { | |
238 value_.as_uint = rhs; | |
239 } | |
240 TraceValue(uint8 rhs) : type_(TRACE_TYPE_UINT) { | |
241 value_.as_uint = rhs; | |
242 } | |
243 TraceValue(int64 rhs) : type_(TRACE_TYPE_INT) { | |
244 value_.as_int = rhs; | |
245 } | |
246 TraceValue(int32 rhs) : type_(TRACE_TYPE_INT) { | |
247 value_.as_int = rhs; | |
248 } | |
249 TraceValue(int16 rhs) : type_(TRACE_TYPE_INT) { | |
250 value_.as_int = rhs; | |
251 } | |
252 TraceValue(int8 rhs) : type_(TRACE_TYPE_INT) { | |
253 value_.as_int = rhs; | |
254 } | |
255 TraceValue(double rhs) : type_(TRACE_TYPE_DOUBLE) { | |
256 value_.as_double = rhs; | |
257 } | |
258 TraceValue(const void* rhs) : type_(TRACE_TYPE_POINTER) { | |
259 value_.as_uint = 0ull; // zero all bits | |
260 value_.as_pointer = rhs; | |
261 } | |
262 explicit TraceValue(const char* rhs) : type_(TRACE_TYPE_STRING) { | |
263 value_.as_uint = 0ull; // zero all bits | |
264 value_.as_string = base::strdup(rhs); | |
265 } | |
266 TraceValue(const TraceValue& rhs) : type_(TRACE_TYPE_UNDEFINED) { | |
267 operator=(rhs); | |
268 } | |
269 ~TraceValue() { | |
270 Destroy(); | |
271 } | |
272 | |
273 TraceValue& operator=(const TraceValue& rhs); | |
274 bool operator==(const TraceValue& rhs) const; | |
275 bool operator!=(const TraceValue& rhs) const { | |
276 return !operator==(rhs); | |
277 } | |
278 | |
279 void Destroy(); | |
280 | |
281 void AppendAsJSON(std::string* out) const; | |
282 | |
283 Type type() const { | |
284 return type_; | |
285 } | |
286 uint64 as_uint() const { | |
287 return value_.as_uint; | |
288 } | |
289 bool as_bool() const { | |
290 return value_.as_bool; | |
291 } | |
292 int64 as_int() const { | |
293 return value_.as_int; | |
294 } | |
295 double as_double() const { | |
296 return value_.as_double; | |
297 } | |
298 const void* as_pointer() const { | |
299 return value_.as_pointer; | |
300 } | |
301 const char* as_string() const { | |
302 return value_.as_string; | |
303 } | |
304 | |
305 private: | |
306 union Value { | |
307 bool as_bool; | |
308 uint64 as_uint; | |
309 int64 as_int; | |
310 double as_double; | |
311 const void* as_pointer; | |
312 char* as_string; | |
313 }; | |
314 | |
315 Type type_; | |
316 Value value_; | |
317 }; | |
318 | |
319 // Output records are "Events" and can be obtained via the | |
320 // OutputCallback whenever the logging system decides to flush. This | |
321 // can happen at any time, on any thread, or you can programatically | |
322 // force it to happen. | |
323 struct TraceEvent { | |
324 static void AppendAsJSON(std::string* out, | |
325 const std::vector<TraceEvent>& events, | |
326 size_t start, | |
327 size_t count); | |
328 TraceEvent(); | |
329 ~TraceEvent(); | |
330 void AppendAsJSON(std::string* out) const; | |
331 | |
332 | |
333 unsigned long processId; | |
334 unsigned long threadId; | |
335 base::TimeTicks timestamp; | |
336 TraceEventPhase phase; | |
337 TraceCategory* category; | |
338 const char* name; | |
339 const char* argNames[TRACE_MAX_NUM_ARGS]; | |
340 TraceValue argValues[TRACE_MAX_NUM_ARGS]; | |
341 }; | |
342 | |
343 | |
344 class TraceLog { | |
345 public: | |
346 static TraceLog* GetInstance(); | |
347 | |
348 // Global enable of tracing. Currently enables all categories or not. | |
349 // TODO(nduca) Replaced with an Enable/DisableCategory() that | |
350 // implicitly controls the global logging state. | |
351 void SetEnabled(bool enabled); | |
352 | |
353 float GetBufferPercentFull() const; | |
354 | |
355 // When enough events are collected, they are handed (in bulk) to | |
356 // the output callback. If no callback is set, the output will be | |
357 // silently dropped. | |
358 typedef Callback1<const std::string& /* json_events */>::Type OutputCallback; | |
359 void SetOutputCallback(OutputCallback* cb); | |
360 | |
361 // The trace buffer does not flush dynamically, so when it fills up, | |
362 // subsequent trace events will be dropped. This callback is generated when | |
363 // the trace buffer is full. | |
364 typedef Callback0::Type BufferFullCallback; | |
365 void SetBufferFullCallback(BufferFullCallback* cb); | |
366 | |
367 // Forwards data collected by a child process to the registered | |
368 // output callback. | |
369 void AddRemotelyCollectedData(const std::string& json_events); | |
370 | |
371 // Flushes all logged data to the callback. | |
372 void Flush(); | |
373 | |
374 // Called by GPU_TRACE_EVENT* macros, don't call this directly. | |
375 TraceCategory* GetCategory(const char* name); | |
376 | |
377 // Called by GPU_TRACE_EVENT* macros, don't call this directly. | |
378 void AddTraceEvent(TraceEventPhase phase, | |
379 const char* file, int line, | |
380 TraceCategory* category, | |
381 const char* name, | |
382 const char* arg1name, TraceValue arg1val, | |
383 const char* arg2name, TraceValue arg2val); | |
384 | |
385 private: | |
386 // This allows constructor and destructor to be private and usable only | |
387 // by the Singleton class. | |
388 friend struct StaticMemorySingletonTraits<TraceLog>; | |
389 | |
390 TraceLog(); | |
391 ~TraceLog(); | |
392 void FlushWithLockAlreadyHeld(); | |
393 | |
394 // TODO(nduca): switch to per-thread trace buffers to reduce thread | |
395 // synchronization. | |
396 base::Lock lock_; | |
397 bool enabled_; | |
398 scoped_ptr<OutputCallback> output_callback_; | |
399 scoped_ptr<BufferFullCallback> buffer_full_callback_; | |
400 std::vector<TraceEvent> logged_events_; | |
401 | |
402 DISALLOW_COPY_AND_ASSIGN(TraceLog); | |
403 }; | |
404 | |
405 namespace internal { | |
406 | |
407 // Used by GPU_TRACE_EVENTx macro. Do not use directly. | |
408 class TraceEndOnScopeClose { | |
409 public: | |
410 TraceEndOnScopeClose(const char* file, int line, | |
411 TraceCategory* category, | |
412 const char* name) | |
413 : file_(file) | |
414 , line_(line) | |
415 , category_(category) | |
416 , name_(name) { } | |
417 | |
418 ~TraceEndOnScopeClose() { | |
419 if (base::subtle::Acquire_Load(&category_->enabled_)) | |
420 gpu::TraceLog::GetInstance()->AddTraceEvent( | |
421 gpu::GPU_TRACE_EVENT_PHASE_END, | |
422 file_, line_, | |
423 category_, | |
424 name_, | |
425 NULL, 0, NULL, 0); | |
426 } | |
427 | |
428 private: | |
429 const char* file_; | |
430 int line_; | |
431 TraceCategory* category_; | |
432 const char* name_; | |
433 }; | |
434 | |
435 } // namespace internal | |
436 | |
437 } // namespace gpu | |
438 #endif // __native_client__ | |
439 #endif // GPU_TRACE_EVENT_H_ | |
OLD | NEW |