OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 // This file is an internal atomic implementation, use atomicops.h instead. | 28 // This file is an internal atomic implementation, use atomicops.h instead. |
29 // | 29 // |
30 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | |
31 | 30 |
32 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 31 #ifndef V8_ATOMICOPS_INTERNALS_SH4_GCC_H_ |
33 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 32 #define V8_ATOMICOPS_INTERNALS_SH4_GCC_H_ |
34 | 33 |
35 namespace v8 { | 34 namespace v8 { |
36 namespace internal { | 35 namespace internal { |
37 | 36 |
38 // 0xffff0fc0 is the hard coded address of a function provided by | |
39 // the kernel which implements an atomic compare-exchange. On older | |
40 // ARM architecture revisions (pre-v6) this may be implemented using | |
41 // a syscall. This address is stable, and in active use (hard coded) | |
42 // by at least glibc-2.7 and the Android C library. | |
43 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value, | |
44 Atomic32 new_value, | |
45 volatile Atomic32* ptr); | |
46 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) = | |
47 (LinuxKernelCmpxchgFunc) 0xffff0fc0; | |
48 | |
49 typedef void (*LinuxKernelMemoryBarrierFunc)(void); | |
50 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) = | |
51 (LinuxKernelMemoryBarrierFunc) 0xffff0fa0; | |
52 | |
53 | |
54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 37 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
55 Atomic32 old_value, | 38 Atomic32 old_value, |
56 Atomic32 new_value) { | 39 Atomic32 new_value) { |
57 Atomic32 prev_value = *ptr; | 40 return __sync_val_compare_and_swap(ptr, old_value, new_value); |
58 do { | |
59 if (!pLinuxKernelCmpxchg(old_value, new_value, | |
60 const_cast<Atomic32*>(ptr))) { | |
61 return old_value; | |
62 } | |
63 prev_value = *ptr; | |
64 } while (prev_value == old_value); | |
65 return prev_value; | |
66 } | 41 } |
67 | 42 |
68 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 43 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
69 Atomic32 new_value) { | 44 Atomic32 new_value) { |
70 Atomic32 old_value; | 45 return __sync_lock_test_and_set(ptr, new_value); |
71 do { | |
72 old_value = *ptr; | |
73 } while (pLinuxKernelCmpxchg(old_value, new_value, | |
74 const_cast<Atomic32*>(ptr))); | |
75 return old_value; | |
76 } | 46 } |
77 | 47 |
78 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 48 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
79 Atomic32 increment) { | 49 Atomic32 increment) { |
80 return Barrier_AtomicIncrement(ptr, increment); | 50 return Barrier_AtomicIncrement(ptr, increment); |
81 } | 51 } |
82 | 52 |
83 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 53 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
84 Atomic32 increment) { | 54 Atomic32 increment) { |
85 for (;;) { | 55 return __sync_add_and_fetch(const_cast<Atomic32*>(ptr), increment); |
86 // Atomic exchange the old value with an incremented one. | |
87 Atomic32 old_value = *ptr; | |
88 Atomic32 new_value = old_value + increment; | |
89 if (pLinuxKernelCmpxchg(old_value, new_value, | |
90 const_cast<Atomic32*>(ptr)) == 0) { | |
91 // The exchange took place as expected. | |
92 return new_value; | |
93 } | |
94 // Otherwise, *ptr changed mid-loop and we need to retry. | |
95 } | |
96 } | 56 } |
97 | 57 |
98 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 58 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
99 Atomic32 old_value, | 59 Atomic32 old_value, |
100 Atomic32 new_value) { | 60 Atomic32 new_value) { |
101 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 61 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 62 MemoryBarrier(); |
| 63 return value; |
102 } | 64 } |
103 | 65 |
104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 66 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
105 Atomic32 old_value, | 67 Atomic32 old_value, |
106 Atomic32 new_value) { | 68 Atomic32 new_value) { |
| 69 MemoryBarrier(); |
107 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 70 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
108 } | 71 } |
109 | 72 |
| 73 |
110 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 74 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
111 *ptr = value; | 75 *ptr = value; |
112 } | 76 } |
113 | 77 |
| 78 |
114 inline void MemoryBarrier() { | 79 inline void MemoryBarrier() { |
115 pLinuxKernelMemoryBarrier(); | 80 __sync_synchronize(); |
116 } | 81 } |
117 | 82 |
| 83 |
118 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 84 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
119 *ptr = value; | 85 *ptr = value; |
120 MemoryBarrier(); | 86 MemoryBarrier(); |
121 } | 87 } |
122 | 88 |
| 89 |
123 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 90 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
124 MemoryBarrier(); | 91 MemoryBarrier(); |
125 *ptr = value; | 92 *ptr = value; |
126 } | 93 } |
127 | 94 |
| 95 |
128 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 96 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
129 return *ptr; | 97 return *ptr; |
130 } | 98 } |
131 | 99 |
| 100 |
132 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 101 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
133 Atomic32 value = *ptr; | 102 Atomic32 value = *ptr; |
134 MemoryBarrier(); | 103 MemoryBarrier(); |
135 return value; | 104 return value; |
136 } | 105 } |
137 | 106 |
| 107 |
138 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 108 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
139 MemoryBarrier(); | 109 MemoryBarrier(); |
140 return *ptr; | 110 return *ptr; |
141 } | 111 } |
142 | 112 |
| 113 |
143 } } // namespace v8::internal | 114 } } // namespace v8::internal |
144 | 115 |
145 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 116 #endif // V8_ATOMICOPS_INTERNALS_SH4_GCC_H_ |
OLD | NEW |