OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkAtomics.h" | 8 #include "SkAtomics.h" |
9 #include "SkEventTracer.h" | 9 #include "SkEventTracer.h" |
10 #include "SkLazyPtr.h" | 10 #include "SkLazyPtr.h" |
11 | 11 |
12 class SkDefaultEventTracer : public SkEventTracer { | 12 class SkDefaultEventTracer : public SkEventTracer { |
13 virtual SkEventTracer::Handle | 13 SkEventTracer::Handle |
14 addTraceEvent(char phase, | 14 addTraceEvent(char phase, |
15 const uint8_t* categoryEnabledFlag, | 15 const uint8_t* categoryEnabledFlag, |
16 const char* name, | 16 const char* name, |
17 uint64_t id, | 17 uint64_t id, |
18 int numArgs, | 18 int numArgs, |
19 const char** argNames, | 19 const char** argNames, |
20 const uint8_t* argTypes, | 20 const uint8_t* argTypes, |
21 const uint64_t* argValues, | 21 const uint64_t* argValues, |
22 uint8_t flags) override { return 0; } | 22 uint8_t flags) override { return 0; } |
23 | 23 |
24 virtual void | 24 void |
25 updateTraceEventDuration(const uint8_t* categoryEnabledFlag, | 25 updateTraceEventDuration(const uint8_t* categoryEnabledFlag, |
26 const char* name, | 26 const char* name, |
27 SkEventTracer::Handle handle) override {}; | 27 SkEventTracer::Handle handle) override {} |
28 | 28 |
29 const uint8_t* getCategoryGroupEnabled(const char* name) override { | 29 const uint8_t* getCategoryGroupEnabled(const char* name) override { |
30 static uint8_t no = 0; | 30 static uint8_t no = 0; |
31 return &no; | 31 return &no; |
32 }; | 32 } |
33 virtual const char* getCategoryGroupName( | 33 const char* getCategoryGroupName( |
34 const uint8_t* categoryEnabledFlag) override { | 34 const uint8_t* categoryEnabledFlag) override { |
35 static const char* dummy = "dummy"; | 35 static const char* dummy = "dummy"; |
36 return dummy; | 36 return dummy; |
37 }; | 37 } |
38 }; | 38 }; |
39 | 39 |
40 // We prefer gUserTracer if it's been set, otherwise we fall back on gDefaultTra
cer. | 40 // We prefer gUserTracer if it's been set, otherwise we fall back on gDefaultTra
cer. |
41 static SkEventTracer* gUserTracer = nullptr; | 41 static SkEventTracer* gUserTracer = nullptr; |
42 SK_DECLARE_STATIC_LAZY_PTR(SkDefaultEventTracer, gDefaultTracer); | 42 SK_DECLARE_STATIC_LAZY_PTR(SkDefaultEventTracer, gDefaultTracer); |
43 | 43 |
44 void SkEventTracer::SetInstance(SkEventTracer* tracer) { | 44 void SkEventTracer::SetInstance(SkEventTracer* tracer) { |
45 SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire)); | 45 SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire)); |
46 sk_atomic_store(&gUserTracer, tracer, sk_memory_order_release); | 46 sk_atomic_store(&gUserTracer, tracer, sk_memory_order_release); |
47 // An atomic load during process shutdown is probably overkill, but safe ove
rkill. | 47 // An atomic load during process shutdown is probably overkill, but safe ove
rkill. |
48 atexit([](){ SkDELETE(sk_atomic_load(&gUserTracer, sk_memory_order_acquire))
; }); | 48 atexit([](){ SkDELETE(sk_atomic_load(&gUserTracer, sk_memory_order_acquire))
; }); |
49 } | 49 } |
50 | 50 |
51 SkEventTracer* SkEventTracer::GetInstance() { | 51 SkEventTracer* SkEventTracer::GetInstance() { |
52 if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acq
uire)) { | 52 if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acq
uire)) { |
53 return tracer; | 53 return tracer; |
54 } | 54 } |
55 return gDefaultTracer.get(); | 55 return gDefaultTracer.get(); |
56 } | 56 } |
OLD | NEW |