OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2013 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #ifndef SkOnce_DEFINED |
| 9 #define SkOnce_DEFINED |
| 10 |
| 11 // SkOnce.h defines two macros, DEF_SK_ONCE and SK_ONCE. |
| 12 // You can use these macros together to create a threadsafe block of code that |
| 13 // runs at most once, no matter how many times you call it. This is |
| 14 // particularly useful for lazy singleton initialization. E.g. |
| 15 // |
| 16 // DEF_SK_ONCE(set_up_my_singleton, SingletonType* singleton) { |
| 17 // // Code in this block will run at most once. |
| 18 // *singleton = new Singleton(...); |
| 19 // } |
| 20 // ... |
| 21 // const Singleton& getSingleton() { |
| 22 // static Singleton* singleton = NULL; |
| 23 // // Always call SK_ONCE. It's very cheap to call after the first time. |
| 24 // SK_ONCE(set_up_my_singleton, singleton); |
| 25 // SkASSERT(NULL != singleton); |
| 26 // return *singleton; |
| 27 // } |
| 28 // |
| 29 // OnceTest.cpp also should serve as another simple example. |
| 30 |
| 31 #include "SkThread.h" |
| 32 #include "SkTypes.h" |
| 33 |
| 34 |
| 35 // Pass a unique name (at least in this scope) for name, and a type and name |
| 36 // for arg (as if writing a function declaration). |
| 37 // E.g. |
| 38 // DEF_SK_ONCE(my_onetime_setup, int* foo) { |
| 39 // *foo += 5; |
| 40 // } |
| 41 #define DEF_SK_ONCE(name, arg) \ |
| 42 static bool sk_once_##name##_done = false; \ |
| 43 SK_DECLARE_STATIC_MUTEX(sk_once_##name##_mutex); \ |
| 44 static void sk_once_##name##_function(arg) |
| 45 |
| 46 // Call this anywhere you need to guarantee that the corresponding DEF_SK_ONCE |
| 47 // block of code has run. name should match the DEF_SK_ONCE, and here you pass |
| 48 // the actual value of the argument. |
| 49 // E.g |
| 50 // int foo = 0; |
| 51 // SK_ONCE(my_onetime_setup, &foo); |
| 52 // SkASSERT(5 == foo); |
| 53 #define SK_ONCE(name, arg) \ |
| 54 sk_once_impl(&sk_once_##name##_done, &sk_once_##name##_mutex, sk_once_##name
##_function, arg) |
| 55 |
| 56 |
| 57 // ---------------------- Implementation details below here. -----------------
------------ |
| 58 |
| 59 |
| 60 // TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when
refactoring lands. |
| 61 |
| 62 #ifdef SK_BUILD_FOR_WIN |
| 63 #include <intrin.h> |
| 64 inline static void compiler_barrier() { |
| 65 _ReadWriteBarrier(); |
| 66 } |
| 67 #else |
| 68 inline static void compiler_barrier() { |
| 69 asm volatile("" : : : "memory"); |
| 70 } |
| 71 #endif |
| 72 |
| 73 inline static void full_barrier_on_arm() { |
| 74 #ifdef SK_CPU_ARM |
| 75 asm volatile("dmb" : : : "memory"); |
| 76 #endif |
| 77 } |
| 78 |
| 79 // On every platform, we issue a compiler barrier to prevent it from reordering |
| 80 // code. That's enough for platforms like x86 where release and acquire |
| 81 // barriers are no-ops. On other platforms we may need to be more careful; |
| 82 // ARM, in particular, needs real code for both acquire and release. We use a |
| 83 // full barrier, which acts as both, because that the finest precision ARM |
| 84 // provides. |
| 85 |
| 86 inline static void release_barrier() { |
| 87 compiler_barrier(); |
| 88 full_barrier_on_arm(); |
| 89 } |
| 90 |
| 91 inline static void acquire_barrier() { |
| 92 compiler_barrier(); |
| 93 full_barrier_on_arm(); |
| 94 } |
| 95 |
| 96 // We've pulled a pretty standard double-checked locking implementation apart |
| 97 // into its main fast path and a slow path that's called when we suspect the |
| 98 // one-time code hasn't run yet. |
| 99 |
| 100 // This is the guts of the code, called when we suspect the one-time code hasn't
been run yet. |
| 101 // This should be rarely called, so we separate it from sk_once_impl and don't m
ark it as inline. |
| 102 // (We don't mind if this is an actual function call, but odds are it'll be inli
ned anyway.) |
| 103 template <typename Arg> |
| 104 static void sk_once_impl_slow(bool* done, SkBaseMutex* mutex, void (*once)(Arg),
Arg arg) { |
| 105 const SkAutoMutexAcquire lock(*mutex); |
| 106 if (!*done) { |
| 107 once(arg); |
| 108 // Also known as a store-store/load-store barrier, this makes sure that
the writes |
| 109 // done before here---in particular, those done by calling once(arg)---a
re observable |
| 110 // before the writes after the line, *done = true. |
| 111 // |
| 112 // In version control terms this is like saying, "check in the work up |
| 113 // to and including once(arg), then check in *done=true as a subsequent
change". |
| 114 // |
| 115 // We'll use this in the fast path to make sure once(arg)'s effects are |
| 116 // observable whenever we observe *done == true. |
| 117 release_barrier(); |
| 118 *done = true; |
| 119 } |
| 120 } |
| 121 |
| 122 // We nabbed this code from the dynamic_annotations library, and in their honor |
| 123 // we check the same define. If you find yourself wanting more than just |
| 124 // ANNOTATE_BENIGN_RACE, it might make sense to pull that in as a dependency |
| 125 // rather than continue to reproduce it here. |
| 126 |
| 127 #ifdef DYNAMIC_ANNOTATIONS_ENABLED |
| 128 // TSAN provides this hook to supress a known-safe apparent race. |
| 129 extern "C" { |
| 130 void AnnotateBenignRace(const char* file, int line, const volatile void* mem, co
nst char* desc); |
| 131 } |
| 132 #define ANNOTATE_BENIGN_RACE(mem, desc) AnnotateBenignRace(__FILE__, __LINE__, m
em, desc) |
| 133 #else |
| 134 #define ANNOTATE_BENIGN_RACE(mem, desc) |
| 135 #endif |
| 136 |
| 137 // This is our fast path, called all the time. We do really want it to be inlin
ed. |
| 138 template <typename Arg> |
| 139 inline static void sk_once_impl(bool* done, SkBaseMutex* mutex, void (*once)(Arg
), Arg arg) { |
| 140 ANNOTATE_BENIGN_RACE(done, "Don't worry TSAN, we're sure this is safe."); |
| 141 if (!*done) { |
| 142 sk_once_impl_slow(done, mutex, once, arg); |
| 143 } |
| 144 // Also known as a load-load/load-store barrier, this acquire barrier makes |
| 145 // sure that anything we read from memory---in particular, memory written by |
| 146 // calling once(arg)---is at least as current as the value we read from done
. |
| 147 // |
| 148 // In version control terms, this is a lot like saying "sync up to the |
| 149 // commit where we wrote *done = true". |
| 150 // |
| 151 // The release barrier in sk_once_impl_slow guaranteed that *done = true |
| 152 // happens after once(arg), so by syncing to *done = true here we're |
| 153 // forcing ourselves to also wait until the effects of once(arg) are readble
. |
| 154 acquire_barrier(); |
| 155 } |
| 156 |
| 157 #undef ANNOTATE_BENIGN_RACE |
| 158 |
| 159 |
| 160 #endif // SkOnce_DEFINED |
OLD | NEW |