Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/atomicops_internals_mips_gcc.h

Issue 8413073: MIPS: updated atomic operations. (Closed)
Patch Set: Created 9 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 12 matching lines...) Expand all
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 // This file is an internal atomic implementation, use atomicops.h instead. 28 // This file is an internal atomic implementation, use atomicops.h instead.
29 29
30 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ 30 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
31 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ 31 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
32 32
33 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory") 33 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
34 34
35 namespace v8 { 35 namespace v8 {
36 namespace internal { 36 namespace internal {
37 37
38 // Atomically execute: 38 // Atomically execute:
39 // result = *ptr; 39 // result = *ptr;
40 // if (*ptr == old_value) 40 // if (*ptr == old_value)
41 // *ptr = new_value; 41 // *ptr = new_value;
42 // return result; 42 // return result;
43 // 43 //
44 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". 44 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
45 // Always return the old value of "*ptr" 45 // Always return the old value of "*ptr"
46 // 46 //
47 // This routine implies no memory barriers. 47 // This routine implies no memory barriers.
48 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 48 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
49 Atomic32 old_value, 49 Atomic32 old_value,
50 Atomic32 new_value) { 50 Atomic32 new_value) {
51 Atomic32 prev; 51 Atomic32 prev, tmp;
52 __asm__ __volatile__("1:\n" 52 __asm__ __volatile__(".set push\n"
53 "ll %0, %1\n" // prev = *ptr 53 ".set noreorder\n"
54 "1:\n"
55 "ll %0, %5\n" // prev = *ptr
54 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 56 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
55 "nop\n" // delay slot nop 57 "move %2, %4\n" // tmp = new_value
56 "sc %2, %1\n" // *ptr = new_value (with atomic check) 58 "sc %2, %1\n" // *ptr = tmp (with atomic check)
57 "beqz %2, 1b\n" // start again on atomic error 59 "beqz %2, 1b\n" // start again on atomic error
58 "nop\n" // delay slot nop 60 "nop\n" // delay slot nop
59 "2:\n" 61 "2:\n"
60 : "=&r" (prev), "=m" (*ptr), "+&r" (new_value) 62 ".set pop\n"
63 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
61 : "Ir" (old_value), "r" (new_value), "m" (*ptr) 64 : "Ir" (old_value), "r" (new_value), "m" (*ptr)
62 : "memory"); 65 : "memory");
63 return prev; 66 return prev;
64 } 67 }
65 68
66 // Atomically store new_value into *ptr, returning the previous value held in 69 // Atomically store new_value into *ptr, returning the previous value held in
67 // *ptr. This routine implies no memory barriers. 70 // *ptr. This routine implies no memory barriers.
68 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 71 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
69 Atomic32 new_value) { 72 Atomic32 new_value) {
70 Atomic32 temp, old; 73 Atomic32 temp, old;
71 __asm__ __volatile__("1:\n" 74 __asm__ __volatile__(".set push\n"
75 ".set noreorder\n"
76 "1:\n"
72 "ll %1, %2\n" // old = *ptr 77 "ll %1, %2\n" // old = *ptr
73 "move %0, %3\n" // temp = new_value 78 "move %0, %3\n" // temp = new_value
74 "sc %0, %2\n" // *ptr = temp (with atomic check) 79 "sc %0, %2\n" // *ptr = temp (with atomic check)
75 "beqz %0, 1b\n" // start again on atomic error 80 "beqz %0, 1b\n" // start again on atomic error
76 "nop\n" // delay slot nop 81 "nop\n" // delay slot nop
82 ".set pop\n"
77 : "=&r" (temp), "=&r" (old), "=m" (*ptr) 83 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
78 : "r" (new_value), "m" (*ptr) 84 : "r" (new_value), "m" (*ptr)
79 : "memory"); 85 : "memory");
80 86
81 return old; 87 return old;
82 } 88 }
83 89
84 // Atomically increment *ptr by "increment". Returns the new value of 90 // Atomically increment *ptr by "increment". Returns the new value of
85 // *ptr with the increment applied. This routine implies no memory barriers. 91 // *ptr with the increment applied. This routine implies no memory barriers.
86 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 92 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
87 Atomic32 increment) { 93 Atomic32 increment) {
88 Atomic32 temp, temp2; 94 Atomic32 temp, temp2;
89 95
90 __asm__ __volatile__("1:\n" 96 __asm__ __volatile__(".set push\n"
97 ".set noreorder\n"
98 "1:\n"
91 "ll %0, %2\n" // temp = *ptr 99 "ll %0, %2\n" // temp = *ptr
92 "addu %0, %3\n" // temp = temp + increment 100 "addu %1, %0, %3\n" // temp2 = temp + increment
93 "move %1, %0\n" // temp2 = temp 101 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
94 "sc %0, %2\n" // *ptr = temp (with atomic check) 102 "beqz %1, 1b\n" // start again on atomic error
95 "beqz %0, 1b\n" // start again on atomic error 103 "addu %1, %0, %3\n" // temp2 = temp + increment
96 "nop\n" // delay slot nop 104 ".set pop\n"
97 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) 105 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
98 : "Ir" (increment), "m" (*ptr) 106 : "Ir" (increment), "m" (*ptr)
99 : "memory"); 107 : "memory");
100 // temp2 now holds the final value. 108 // temp2 now holds the final value.
101 return temp2; 109 return temp2;
102 } 110 }
103 111
104 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 112 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
105 Atomic32 increment) { 113 Atomic32 increment) {
114 ATOMICOPS_COMPILER_BARRIER();
106 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); 115 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
107 ATOMICOPS_COMPILER_BARRIER(); 116 ATOMICOPS_COMPILER_BARRIER();
108 return res; 117 return res;
109 } 118 }
110 119
111 // "Acquire" operations 120 // "Acquire" operations
112 // ensure that no later memory access can be reordered ahead of the operation. 121 // ensure that no later memory access can be reordered ahead of the operation.
113 // "Release" operations ensure that no previous memory access can be reordered 122 // "Release" operations ensure that no previous memory access can be reordered
114 // after the operation. "Barrier" operations have both "Acquire" and "Release" 123 // after the operation. "Barrier" operations have both "Acquire" and "Release"
115 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory 124 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
116 // access. 125 // access.
117 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 126 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
118 Atomic32 old_value, 127 Atomic32 old_value,
119 Atomic32 new_value) { 128 Atomic32 new_value) {
120 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
121 ATOMICOPS_COMPILER_BARRIER(); 129 ATOMICOPS_COMPILER_BARRIER();
122 return x; 130 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
131 ATOMICOPS_COMPILER_BARRIER();
132 return res;
123 } 133 }
124 134
125 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 135 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
126 Atomic32 old_value, 136 Atomic32 old_value,
127 Atomic32 new_value) { 137 Atomic32 new_value) {
128 ATOMICOPS_COMPILER_BARRIER(); 138 ATOMICOPS_COMPILER_BARRIER();
129 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 139 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
140 ATOMICOPS_COMPILER_BARRIER();
141 return res;
130 } 142 }
131 143
132 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 144 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
133 *ptr = value; 145 *ptr = value;
134 } 146 }
135 147
136 inline void MemoryBarrier() { 148 inline void MemoryBarrier() {
137 ATOMICOPS_COMPILER_BARRIER(); 149 __asm__ __volatile__("sync" : : : "memory");
138 } 150 }
139 151
140 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 152 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
141 *ptr = value; 153 *ptr = value;
142 MemoryBarrier(); 154 MemoryBarrier();
143 } 155 }
144 156
145 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 157 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
146 MemoryBarrier(); 158 MemoryBarrier();
147 *ptr = value; 159 *ptr = value;
(...skipping 12 matching lines...) Expand all
160 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 172 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
161 MemoryBarrier(); 173 MemoryBarrier();
162 return *ptr; 174 return *ptr;
163 } 175 }
164 176
165 } } // namespace v8::internal 177 } } // namespace v8::internal
166 178
167 #undef ATOMICOPS_COMPILER_BARRIER 179 #undef ATOMICOPS_COMPILER_BARRIER
168 180
169 #endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ 181 #endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698