| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2014 Google Inc. | 2 * Copyright 2014 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkAtomics.h" | 8 #include "SkAtomics.h" |
| 9 #include "SkEventTracer.h" | 9 #include "SkEventTracer.h" |
| 10 #include "SkLazyPtr.h" | 10 #include "SkLazyPtr.h" |
| (...skipping 29 matching lines...) Expand all Loading... |
| 40 }; | 40 }; |
| 41 | 41 |
| 42 // We prefer gUserTracer if it's been set, otherwise we fall back on gDefaultTra
cer. | 42 // We prefer gUserTracer if it's been set, otherwise we fall back on gDefaultTra
cer. |
| 43 static SkEventTracer* gUserTracer = nullptr; | 43 static SkEventTracer* gUserTracer = nullptr; |
| 44 SK_DECLARE_STATIC_LAZY_PTR(SkDefaultEventTracer, gDefaultTracer); | 44 SK_DECLARE_STATIC_LAZY_PTR(SkDefaultEventTracer, gDefaultTracer); |
| 45 | 45 |
| 46 void SkEventTracer::SetInstance(SkEventTracer* tracer) { | 46 void SkEventTracer::SetInstance(SkEventTracer* tracer) { |
| 47 SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire)); | 47 SkASSERT(nullptr == sk_atomic_load(&gUserTracer, sk_memory_order_acquire)); |
| 48 sk_atomic_store(&gUserTracer, tracer, sk_memory_order_release); | 48 sk_atomic_store(&gUserTracer, tracer, sk_memory_order_release); |
| 49 // An atomic load during process shutdown is probably overkill, but safe ove
rkill. | 49 // An atomic load during process shutdown is probably overkill, but safe ove
rkill. |
| 50 atexit([](){ SkDELETE(sk_atomic_load(&gUserTracer, sk_memory_order_acquire))
; }); | 50 atexit([]() { delete sk_atomic_load(&gUserTracer, sk_memory_order_acquire);
}); |
| 51 } | 51 } |
| 52 | 52 |
| 53 SkEventTracer* SkEventTracer::GetInstance() { | 53 SkEventTracer* SkEventTracer::GetInstance() { |
| 54 if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acq
uire)) { | 54 if (SkEventTracer* tracer = sk_atomic_load(&gUserTracer, sk_memory_order_acq
uire)) { |
| 55 return tracer; | 55 return tracer; |
| 56 } | 56 } |
| 57 return gDefaultTracer.get(); | 57 return gDefaultTracer.get(); |
| 58 } | 58 } |
| OLD | NEW |