| Index: include/core/SkThread.h
|
| diff --git a/include/core/SkThread.h b/include/core/SkThread.h
|
| index 412ace31eb24f64e31c1d2e9529d24a3c004e95a..160b40ce29863d5978e8a715436976ed26f6ed2b 100644
|
| --- a/include/core/SkThread.h
|
| +++ b/include/core/SkThread.h
|
| @@ -33,6 +33,12 @@ static int32_t sk_atomic_dec(int32_t* addr);
|
| */
|
| static int32_t sk_atomic_conditional_inc(int32_t* addr);
|
|
|
| +/** Atomic compare and set.
|
| + * If *addr == before, set *addr to after and return true, otherwise return false.
|
| + * This must act as a release (SL/S) memory barrier and as a compiler barrier.
|
| + */
|
| +static bool sk_atomic_cas(int32_t* addr, int32_t before, int32_t after);
|
| +
|
| /** If sk_atomic_dec does not act as an acquire (L/SL) barrier,
|
| * this must act as an acquire (L/SL) memory barrier and as a compiler barrier.
|
| */
|
| @@ -45,6 +51,34 @@ static void sk_membar_acquire__after_atomic_conditional_inc();
|
|
|
| #include SK_ATOMICS_PLATFORM_H
|
|
|
| +// This is POD and must be zero-initialized.
|
| +struct SkSpinlock {
|
| + void acquire() {
|
| + SkASSERT(shouldBeZero == 0);
|
| + // No memory barrier needed, but sk_atomic_cas gives us at least release anyway.
|
| + while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) {
|
| + // spin
|
| + }
|
| + }
|
| +
|
| + void release() {
|
| + SkASSERT(shouldBeZero == 0);
|
| + // This requires a release memory barrier before storing, which sk_atomic_cas guarantees.
|
| + SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0));
|
| + }
|
| +
|
| + int32_t thisIsPrivate;
|
| + SkDEBUGCODE(int32_t shouldBeZero;)
|
| +};
|
| +
|
| +class SkAutoSpinlock : SkNoncopyable {
|
| +public:
|
| + explicit SkAutoSpinlock(SkSpinlock* lock) : fLock(lock) { fLock->acquire(); }
|
| + ~SkAutoSpinlock() { fLock->release(); }
|
| +private:
|
| + SkSpinlock* fLock;
|
| +};
|
| +#define SkAutoSpinlock(...) SK_REQUIRE_LOCAL_VAR(SkAutoSpinlock)
|
|
|
| /** SK_MUTEX_PLATFORM_H must provide the following (or equivalent) declarations.
|
|
|
|
|