| OLD | NEW |
| (Empty) |
| 1 | |
| 2 /* | |
| 3 * Copyright 2006 The Android Open Source Project | |
| 4 * | |
| 5 * Use of this source code is governed by a BSD-style license that can be | |
| 6 * found in the LICENSE file. | |
| 7 */ | |
| 8 | |
| 9 | |
| 10 #ifndef SkThread_platform_DEFINED | |
| 11 #define SkThread_platform_DEFINED | |
| 12 | |
| 13 #if defined(SK_BUILD_FOR_ANDROID) | |
| 14 | |
| 15 #if !defined(SK_BUILD_FOR_ANDROID_FRAMEWORK) | |
| 16 | |
| 17 #include <stdint.h> | |
| 18 | |
| 19 /* Just use the GCC atomic intrinsics. They're supported by the NDK toolchain, | |
| 20 * have reasonable performance, and provide full memory barriers | |
| 21 */ | |
| 22 static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr
) { | |
| 23 return __sync_fetch_and_add(addr, 1); | |
| 24 } | |
| 25 | |
| 26 static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t *addr
, int32_t inc) { | |
| 27 return __sync_fetch_and_add(addr, inc); | |
| 28 } | |
| 29 | |
| 30 static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr
) { | |
| 31 return __sync_fetch_and_add(addr, -1); | |
| 32 } | |
| 33 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic
_dec() { } | |
| 34 | |
| 35 static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(i
nt32_t* addr) { | |
| 36 int32_t value = *addr; | |
| 37 | |
| 38 while (true) { | |
| 39 if (value == 0) { | |
| 40 return 0; | |
| 41 } | |
| 42 | |
| 43 int32_t before = __sync_val_compare_and_swap(addr, value, value + 1); | |
| 44 | |
| 45 if (before == value) { | |
| 46 return value; | |
| 47 } else { | |
| 48 value = before; | |
| 49 } | |
| 50 } | |
| 51 } | |
| 52 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic
_conditional_inc() { } | |
| 53 | |
| 54 #else // SK_BUILD_FOR_ANDROID_FRAMEWORK | |
| 55 | |
| 56 /* The platform atomics operations are slightly more efficient than the | |
| 57 * GCC built-ins, so use them. | |
| 58 */ | |
| 59 #include <utils/Atomic.h> | |
| 60 | |
| 61 #define sk_atomic_inc(addr) android_atomic_inc(addr) | |
| 62 #define sk_atomic_add(addr, inc) android_atomic_add(inc, addr) | |
| 63 #define sk_atomic_dec(addr) android_atomic_dec(addr) | |
| 64 | |
| 65 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic
_dec() { | |
| 66 //HACK: Android is actually using full memory barriers. | |
| 67 // Should this change, uncomment below. | |
| 68 //int dummy; | |
| 69 //android_atomic_aquire_store(0, &dummy); | |
| 70 } | |
| 71 static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(i
nt32_t* addr) { | |
| 72 while (true) { | |
| 73 int32_t value = *addr; | |
| 74 if (value == 0) { | |
| 75 return 0; | |
| 76 } | |
| 77 if (0 == android_atomic_release_cas(value, value + 1, addr)) { | |
| 78 return value; | |
| 79 } | |
| 80 } | |
| 81 } | |
| 82 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic
_conditional_inc() { | |
| 83 //HACK: Android is actually using full memory barriers. | |
| 84 // Should this change, uncomment below. | |
| 85 //int dummy; | |
| 86 //android_atomic_aquire_store(0, &dummy); | |
| 87 } | |
| 88 | |
| 89 #endif // SK_BUILD_FOR_ANDROID_FRAMEWORK | |
| 90 | |
| 91 #else // !SK_BUILD_FOR_ANDROID | |
| 92 | |
| 93 /** Implemented by the porting layer, this function adds one to the int | |
| 94 specified by the address (in a thread-safe manner), and returns the | |
| 95 previous value. | |
| 96 No additional memory barrier is required. | |
| 97 This must act as a compiler barrier. | |
| 98 */ | |
| 99 SK_API int32_t sk_atomic_inc(int32_t* addr); | |
| 100 | |
| 101 /** Implemented by the porting layer, this function adds inc to the int | |
| 102 specified by the address (in a thread-safe manner), and returns the | |
| 103 previous value. | |
| 104 No additional memory barrier is required. | |
| 105 This must act as a compiler barrier. | |
| 106 */ | |
| 107 SK_API int32_t sk_atomic_add(int32_t* addr, int32_t inc); | |
| 108 | |
| 109 /** Implemented by the porting layer, this function subtracts one from the int | |
| 110 specified by the address (in a thread-safe manner), and returns the | |
| 111 previous value. | |
| 112 Expected to act as a release (SL/S) memory barrier and a compiler barrier. | |
| 113 */ | |
| 114 SK_API int32_t sk_atomic_dec(int32_t* addr); | |
| 115 /** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected | |
| 116 to act as an aquire (L/SL) memory barrier and as a compiler barrier. | |
| 117 */ | |
| 118 SK_API void sk_membar_aquire__after_atomic_dec(); | |
| 119 | |
| 120 /** Implemented by the porting layer, this function adds one to the int | |
| 121 specified by the address iff the int specified by the address is not zero | |
| 122 (in a thread-safe manner), and returns the previous value. | |
| 123 No additional memory barrier is required. | |
| 124 This must act as a compiler barrier. | |
| 125 */ | |
| 126 SK_API int32_t sk_atomic_conditional_inc(int32_t*); | |
| 127 /** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this | |
| 128 is expected to act as an aquire (L/SL) memory barrier and as a compiler | |
| 129 barrier. | |
| 130 */ | |
| 131 SK_API void sk_membar_aquire__after_atomic_conditional_inc(); | |
| 132 | |
| 133 #endif // !SK_BUILD_FOR_ANDROID | |
| 134 | |
| 135 #ifdef SK_USE_POSIX_THREADS | |
| 136 | |
| 137 #include <pthread.h> | |
| 138 | |
| 139 // A SkBaseMutex is a POD structure that can be directly initialized | |
| 140 // at declaration time with SK_DECLARE_STATIC/GLOBAL_MUTEX. This avoids the | |
| 141 // generation of a static initializer in the final machine code (and | |
| 142 // a corresponding static finalizer). | |
| 143 // | |
| 144 struct SkBaseMutex { | |
| 145 void acquire() { pthread_mutex_lock(&fMutex); } | |
| 146 void release() { pthread_mutex_unlock(&fMutex); } | |
| 147 pthread_mutex_t fMutex; | |
| 148 }; | |
| 149 | |
| 150 // Using POD-style initialization prevents the generation of a static initialize
r | |
| 151 // and keeps the acquire() implementation small and fast. | |
| 152 #define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name = { PTHREAD_MUT
EX_INITIALIZER } | |
| 153 | |
| 154 // Special case used when the static mutex must be available globally. | |
| 155 #define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name = { PTHREAD_MUTEX_INIT
IALIZER } | |
| 156 | |
| 157 // A normal mutex that requires to be initialized through normal C++ constructio
n, | |
| 158 // i.e. when it's a member of another class, or allocated on the heap. | |
| 159 class SK_API SkMutex : public SkBaseMutex, SkNoncopyable { | |
| 160 public: | |
| 161 SkMutex(); | |
| 162 ~SkMutex(); | |
| 163 }; | |
| 164 | |
| 165 #else // !SK_USE_POSIX_THREADS | |
| 166 | |
| 167 // In the generic case, SkBaseMutex and SkMutex are the same thing, and we | |
| 168 // can't easily get rid of static initializers. | |
| 169 // | |
| 170 class SK_API SkMutex : SkNoncopyable { | |
| 171 public: | |
| 172 SkMutex(); | |
| 173 ~SkMutex(); | |
| 174 | |
| 175 void acquire(); | |
| 176 void release(); | |
| 177 | |
| 178 private: | |
| 179 bool fIsGlobal; | |
| 180 enum { | |
| 181 kStorageIntCount = 64 | |
| 182 }; | |
| 183 uint32_t fStorage[kStorageIntCount]; | |
| 184 }; | |
| 185 | |
| 186 typedef SkMutex SkBaseMutex; | |
| 187 | |
| 188 #define SK_DECLARE_STATIC_MUTEX(name) static SkBaseMutex name | |
| 189 #define SK_DECLARE_GLOBAL_MUTEX(name) SkBaseMutex name | |
| 190 | |
| 191 #endif // !SK_USE_POSIX_THREADS | |
| 192 | |
| 193 | |
| 194 #endif | |
| OLD | NEW |