OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "third_party/skia/include/core/SkThread.h" | 5 #ifndef SkAtomics_chrome_DEFINED |
6 | 6 #define SkAtomics_chrome_DEFINED |
7 #include <new> | |
8 | 7 |
9 #include "base/atomicops.h" | 8 #include "base/atomicops.h" |
10 #include "base/basictypes.h" | 9 #include "base/basictypes.h" |
11 #include "base/logging.h" | |
12 #include "base/synchronization/lock.h" | |
13 | 10 |
14 /** Adds one to the int specified by the address (in a thread-safe manner), and | 11 /** Atomic add one to the int and return the previous value. |
15 returns the previous value. | 12 * No memory barrier is required; must act as a compiler barrier. |
16 No additional memory barrier is required. | 13 */ |
17 This must act as a compiler barrier. | 14 static inline int32_t sk_atomic_inc(int32_t* addr) { |
18 */ | |
19 int32_t sk_atomic_inc(int32_t* addr) { | |
20 // sk_atomic_inc is expected to return the old value, | 15 // sk_atomic_inc is expected to return the old value, |
21 // Barrier_AtomicIncrement returns the new value. | 16 // NoBarrier_AtomicIncrement returns the new value. |
22 return base::subtle::NoBarrier_AtomicIncrement(addr, 1) - 1; | 17 return base::subtle::NoBarrier_AtomicIncrement(addr, 1) - 1; |
23 } | 18 } |
24 | 19 |
25 /* Subtracts one from the int specified by the address (in a thread-safe | 20 /** Atomic add inc to the int return the previous value. |
26 manner), and returns the previous value. | 21 * No memory barrier is required; must act as a compiler barrier. |
27 Expected to act as a release (SL/S) memory barrier and a compiler barrier. | 22 */ |
28 */ | 23 static inline int32_t sk_atomic_add(int32_t* addr, int32_t inc) { |
29 int32_t sk_atomic_dec(int32_t* addr) { | 24 // sk_atomic_add is expected to return the old value, |
| 25 // Barrier_AtomicIncrement returns the new value. |
| 26 return base::subtle::Barrier_AtomicIncrement(addr, inc) - inc; |
| 27 } |
| 28 |
| 29 /** Atomic subtracts one from the int and return the previous value. |
| 30 * Must act as a release (SL/S) memory barrier and as a compiler barrier. |
| 31 */ |
| 32 static inline int32_t sk_atomic_dec(int32_t* addr) { |
30 // sk_atomic_dec is expected to return the old value, | 33 // sk_atomic_dec is expected to return the old value, |
31 // Barrier_AtomicIncrement returns the new value. | 34 // Barrier_AtomicIncrement returns the new value. |
32 return base::subtle::Barrier_AtomicIncrement(addr, -1) + 1; | 35 return base::subtle::Barrier_AtomicIncrement(addr, -1) + 1; |
33 } | 36 } |
34 /** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected | 37 /** If sk_atomic_dec does not act as an acquire (L/SL) barrier, |
35 to act as an aquire (L/SL) memory barrier and as a compiler barrier. | 38 * must act as an acquire (L/SL) memory barrier and as a compiler barrier. |
36 */ | 39 */ |
37 void sk_membar_aquire__after_atomic_dec() { } | 40 static inline void sk_membar_acquire__after_atomic_dec() { } |
38 | 41 |
39 /** Adds one to the int specified by the address iff the int specified by the | 42 /** Atomic add one to the int iff the int was not 0; return the previous value. |
40 address is not zero (in a thread-safe manner), and returns the previous | 43 * No memory barrier is required; must act as a compiler barrier. |
41 value. | 44 */ |
42 No additional memory barrier is required. | 45 static inline int32_t sk_atomic_conditional_inc(int32_t* addr) { |
43 This must act as a compiler barrier. | |
44 */ | |
45 int32_t sk_atomic_conditional_inc(int32_t* addr) { | |
46 int32_t value = *addr; | 46 int32_t value = *addr; |
47 | 47 |
48 while (true) { | 48 while (true) { |
49 if (value == 0) { | 49 if (value == 0) { |
50 return 0; | 50 return 0; |
51 } | 51 } |
52 | 52 |
53 int32_t before; | 53 int32_t before; |
54 before = base::subtle::Acquire_CompareAndSwap(addr, value, value + 1); | 54 before = base::subtle::Acquire_CompareAndSwap(addr, value, value + 1); |
55 | 55 |
56 if (before == value) { | 56 if (before == value) { |
57 return value; | 57 return value; |
58 } else { | 58 } else { |
59 value = before; | 59 value = before; |
60 } | 60 } |
61 } | 61 } |
62 } | 62 } |
63 /** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this | 63 /** If sk_atomic_conditional_inc does not act as an acquire (L/SL) barrier, |
64 is expected to act as an aquire (L/SL) memory barrier and as a compiler | 64 * must act as an acquire (L/SL) memory barrier and as a compiler barrier. |
65 barrier. | 65 */ |
| 66 static inline void sk_membar_acquire__after_atomic_conditional_inc() { } |
| 67 |
| 68 /** Atomically load the int. |
| 69 * Must act as an acquire (L/SL) memory barrier and as a compiler barrier. |
66 */ | 70 */ |
67 void sk_membar_aquire__after_atomic_conditional_inc() { } | 71 static inline int32_t sk_atomic_load_acquire(int32_t* addr) { |
68 | 72 return base::subtle::Acquire_Load(addr); |
69 SkMutex::SkMutex() { | |
70 COMPILE_ASSERT(sizeof(base::Lock) <= sizeof(fStorage), Lock_is_too_big_for_SkM
utex); | |
71 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
72 new(lock) base::Lock(); | |
73 } | 73 } |
74 | 74 |
75 SkMutex::~SkMutex() { | 75 #endif |
76 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
77 lock->~Lock(); | |
78 } | |
79 | 76 |
80 void SkMutex::acquire() { | |
81 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
82 lock->Acquire(); | |
83 } | |
84 | |
85 void SkMutex::release() { | |
86 base::Lock* lock = reinterpret_cast<base::Lock*>(fStorage); | |
87 lock->Release(); | |
88 } | |
OLD | NEW |