Index: src/core/SkOnce.h |
diff --git a/src/core/SkOnce.h b/src/core/SkOnce.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..262963ba40d8c8a32e0d518a36e243b2b8f847a2 |
--- /dev/null |
+++ b/src/core/SkOnce.h |
@@ -0,0 +1,78 @@ |
+#ifndef SkOnce_DEFINED |
+#define SkOnce_DEFINED |
+ |
+// TODO: docs |
+ |
+#include "SkThread.h" |
+#include "SkTypes.h" |
+ |
+#define DEF_SK_ONCE(name, arg) \ |
+ static bool sk_once##name##ready = false; \ |
bungeman-skia
2013/10/08 22:34:59
nit: can we put some '__' before and after name? I
mtklein
2013/10/09 15:22:25
You betcha. Done. Took the opportunity to rename
|
+ SK_DECLARE_STATIC_MUTEX(sk_once##name##mutex); \ |
+ static void sk_once##name##function(arg) |
+ |
+ |
+inline static void compiler_barrier() { |
bungeman-skia
2013/10/08 22:34:59
note that on Windows this is _ReadWriteBarrier, no
mtklein
2013/10/09 15:22:25
Done.
|
+ asm volatile("" ::: "memory"); |
+} |
+ |
+inline static void full_barrier_on_arm() { |
+#ifdef SK_CPU_ARM |
+ asm volatile("dmb" ::: "memory"); |
bungeman-skia
2013/10/08 22:34:59
note that android has android_memory_barrier and a
mtklein
2013/10/09 15:22:25
Yeah. Given that ARM doesn't quite imply Android,
|
+#endif |
+} |
+ |
+// On every platform, we issue a compiler barrier to prevent it from reordering |
+// code. That's enough for platforms like x86 where release and acquire |
+// barriers are no-ops. On other platforms we may need to be more careful; |
+// ARM, in particular, needs real code for both acquire and release. We use a |
+// full barrier, which acts as both, because that the finest precision ARM |
+// provides. |
+ |
+inline static void release_barrier() { |
+ compiler_barrier(); |
+ full_barrier_on_arm(); |
+} |
+ |
+inline static void acquire_barrier() { |
bungeman-skia
2013/10/08 22:34:59
of course, all of this goes in some future cleaned
mtklein
2013/10/09 15:22:25
Agreed. I've added a note to remind us.
|
+ compiler_barrier(); |
+ full_barrier_on_arm(); |
+} |
+ |
+// This should be rarely called, so we separate it from sk_once_impl and don't mark it as inline. |
+template <typename Arg> |
+static void sk_once_impl_slow(bool* ready, SkBaseMutex* mutex, void (*once)(Arg), Arg arg) { |
+ const SkAutoMutexAcquire lock(*mutex); |
+ if (!*ready) { |
+ once(arg); |
+ release_barrier(); |
bungeman-skia
2013/10/08 22:34:59
I know that we know why these barriers are where t
mtklein
2013/10/09 15:22:25
Yeah, that was the TODO. How's this read to you n
|
+ *ready = true; |
+ } |
+} |
+ |
+// Thread sanitizer provides a hook to stifle a known safe race. |
+extern "C" { |
+void AnnotateBenignRace(const char* file, int line, const volatile void* mem, const char* desc); |
+} |
+// We need to define it as a no-op if we're not using TSAN |
+#if SK_HAS_COMPILER_FEATURE(thread_sanitizer) |
bungeman-skia
2013/10/08 22:34:59
SK_HAS_COMPILER_FEATURE is only available on clang
mtklein
2013/10/09 15:22:25
Done.
|
+# define ANNOTATE_BENIGN_RACE(file, line, mem, desc) AnnotateBenignRace(file, line, mem, desc) |
+#else |
+# define ANNOTATE_BENIGN_RACE(file, line, mem, desc) |
+#endif |
+ |
+template <typename Arg> |
+inline static void sk_once_impl(bool* ready, SkBaseMutex* mutex, void (*once)(Arg), Arg arg) { |
+ ANNOTATE_BENIGN_RACE(__FILE__, __LINE__, ready, "Don't worry TSAN, we're sure this is safe."); |
+ if (!*ready) { |
+ sk_once_impl_slow(ready, mutex, once, arg); |
+ } |
+ acquire_barrier(); |
bungeman-skia
2013/10/08 22:34:59
A comment like "Ensure that we see the effects of
mtklein
2013/10/09 15:22:25
Done.
|
+} |
+ |
+#undef ANNOTATE_BENIGN_RACE |
+ |
+#define SK_ONCE(name, arg) \ |
+ sk_once_impl(&sk_once##name##ready, &sk_once##name##mutex, sk_once##name##function, arg) |
+ |
+#endif // SkOnce_DEFINED |