| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "third_party/skia/include/core/SkThread.h" | |
| 6 | |
| 7 #include <new> | |
| 8 | |
| 9 #include "base/atomicops.h" | |
| 10 #include "base/basictypes.h" | |
| 11 #include "base/logging.h" | |
| 12 #include "base/synchronization/lock.h" | |
| 13 | |
| 14 /** Adds one to the int specified by the address (in a thread-safe manner), and | |
| 15 returns the previous value. | |
| 16 No additional memory barrier is required. | |
| 17 This must act as a compiler barrier. | |
| 18 */ | |
| 19 int32_t sk_atomic_inc(int32_t* addr) { | |
| 20 // sk_atomic_inc is expected to return the old value, | |
| 21 // Barrier_AtomicIncrement returns the new value. | |
| 22 return base::subtle::NoBarrier_AtomicIncrement(addr, 1) - 1; | |
| 23 } | |
| 24 | |
| 25 /* Subtracts one from the int specified by the address (in a thread-safe | |
| 26 manner), and returns the previous value. | |
| 27 Expected to act as a release (SL/S) memory barrier and a compiler barrier. | |
| 28 */ | |
| 29 int32_t sk_atomic_dec(int32_t* addr) { | |
| 30 // sk_atomic_dec is expected to return the old value, | |
| 31 // Barrier_AtomicIncrement returns the new value. | |
| 32 return base::subtle::Barrier_AtomicIncrement(addr, -1) + 1; | |
| 33 } | |
| 34 /** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected | |
| 35 to act as an aquire (L/SL) memory barrier and as a compiler barrier. | |
| 36 */ | |
| 37 void sk_membar_aquire__after_atomic_dec() { } | |
| 38 | |
| 39 /** Adds one to the int specified by the address iff the int specified by the | |
| 40 address is not zero (in a thread-safe manner), and returns the previous | |
| 41 value. | |
| 42 No additional memory barrier is required. | |
| 43 This must act as a compiler barrier. | |
| 44 */ | |
| 45 int32_t sk_atomic_conditional_inc(int32_t* addr) { | |
| 46 int32_t value = *addr; | |
| 47 | |
| 48 while (true) { | |
| 49 if (value == 0) { | |
| 50 return 0; | |
| 51 } | |
| 52 | |
| 53 int32_t before; | |
| 54 before = base::subtle::Acquire_CompareAndSwap(addr, value, value + 1); | |
| 55 | |
| 56 if (before == value) { | |
| 57 return value; | |
| 58 } else { | |
| 59 value = before; | |
| 60 } | |
| 61 } | |
| 62 } | |
| 63 /** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this | |
| 64 is expected to act as an aquire (L/SL) memory barrier and as a compiler | |
| 65 barrier. | |
| 66 */ | |
| 67 void sk_membar_aquire__after_atomic_conditional_inc() { } | |
| 68 | |
| 69 SkMutex::SkMutex() { | |
| 70 COMPILE_ASSERT(sizeof(base::Lock) <= sizeof(fStorage), Lock_is_too_big_for_SkM
utex); | |
| 71 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
| 72 new(lock) base::Lock(); | |
| 73 } | |
| 74 | |
| 75 SkMutex::~SkMutex() { | |
| 76 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
| 77 lock->~Lock(); | |
| 78 } | |
| 79 | |
| 80 void SkMutex::acquire() { | |
| 81 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
| 82 lock->Acquire(); | |
| 83 } | |
| 84 | |
| 85 void SkMutex::release() { | |
| 86 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
| 87 lock->Release(); | |
| 88 } | |
| OLD | NEW |