OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
6 | 6 |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
8 #define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
11 namespace base { | 11 namespace base { |
12 | 12 |
13 // Atomically execute: | 13 // Atomically execute: |
14 // result = *ptr; | 14 // result = *ptr; |
15 // if (*ptr == old_value) | 15 // if (*ptr == old_value) |
16 // *ptr = new_value; | 16 // *ptr = new_value; |
17 // return result; | 17 // return result; |
18 // | 18 // |
19 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". | 19 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
20 // Always return the old value of "*ptr" | 20 // Always return the old value of "*ptr" |
21 // | 21 // |
22 // This routine implies no memory barriers. | 22 // This routine implies no memory barriers. |
23 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 23 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
24 Atomic32 old_value, | 24 Atomic32 old_value, |
25 Atomic32 new_value) { | 25 Atomic32 new_value) { |
26 Atomic32 prev, tmp; | 26 Atomic32 prev, tmp; |
27 __asm__ __volatile__(".set push\n" | 27 __asm__ __volatile__(".set push\n" |
28 ".set noreorder\n" | 28 ".set noreorder\n" |
29 "1:\n" | 29 "1:\n" |
30 "ll %0, %5\n" // prev = *ptr | 30 "ll %0, 0(%4)\n" // prev = *ptr |
31 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | 31 "bne %0, %2, 2f\n" // if (prev != old_value) goto 2 |
32 "move %2, %4\n" // tmp = new_value | 32 "move %1, %3\n" // tmp = new_value |
33 "sc %2, %1\n" // *ptr = tmp (with atomic check) | 33 "sc %1, 0(%4)\n" // *ptr = tmp (with atomic check) |
34 "beqz %2, 1b\n" // start again on atomic error | 34 "beqz %1, 1b\n" // start again on atomic error |
35 "nop\n" // delay slot nop | 35 "nop\n" // delay slot nop |
36 "2:\n" | 36 "2:\n" |
37 ".set pop\n" | 37 ".set pop\n" |
38 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | 38 : "=&r" (prev), "=&r" (tmp) |
39 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | 39 : "Ir" (old_value), "r" (new_value), "r" (ptr) |
40 : "memory"); | 40 : "memory"); |
41 return prev; | 41 return prev; |
42 } | 42 } |
43 | 43 |
44 // Atomically store new_value into *ptr, returning the previous value held in | 44 // Atomically store new_value into *ptr, returning the previous value held in |
45 // *ptr. This routine implies no memory barriers. | 45 // *ptr. This routine implies no memory barriers. |
46 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 46 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
47 Atomic32 new_value) { | 47 Atomic32 new_value) { |
48 Atomic32 temp, old; | 48 Atomic32 temp, old; |
49 __asm__ __volatile__(".set push\n" | 49 __asm__ __volatile__(".set push\n" |
50 ".set noreorder\n" | 50 ".set noreorder\n" |
| 51 ".set at\n" |
51 "1:\n" | 52 "1:\n" |
52 "ll %1, %2\n" // old = *ptr | 53 "ll %1, 0(%3)\n" // old = *ptr |
53 "move %0, %3\n" // temp = new_value | 54 "move %0, %2\n" // temp = new_value |
54 "sc %0, %2\n" // *ptr = temp (with atomic check) | 55 "sc %0, 0(%3)\n" // *ptr = temp (with atomic check) |
55 "beqz %0, 1b\n" // start again on atomic error | 56 "beqz %0, 1b\n" // start again on atomic error |
56 "nop\n" // delay slot nop | 57 "nop\n" // delay slot nop |
57 ".set pop\n" | 58 ".set pop\n" |
58 : "=&r" (temp), "=&r" (old), "=m" (*ptr) | 59 : "=&r" (temp), "=&r" (old) |
59 : "r" (new_value), "m" (*ptr) | 60 : "r" (new_value), "r" (ptr) |
60 : "memory"); | 61 : "memory"); |
61 | 62 |
62 return old; | 63 return old; |
63 } | 64 } |
64 | 65 |
65 // Atomically increment *ptr by "increment". Returns the new value of | 66 // Atomically increment *ptr by "increment". Returns the new value of |
66 // *ptr with the increment applied. This routine implies no memory barriers. | 67 // *ptr with the increment applied. This routine implies no memory barriers. |
67 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 68 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
68 Atomic32 increment) { | 69 Atomic32 increment) { |
69 Atomic32 temp, temp2; | 70 Atomic32 temp, temp2; |
70 | 71 |
71 __asm__ __volatile__(".set push\n" | 72 __asm__ __volatile__(".set push\n" |
72 ".set noreorder\n" | 73 ".set noreorder\n" |
73 "1:\n" | 74 "1:\n" |
74 "ll %0, %2\n" // temp = *ptr | 75 "ll %0, 0(%3)\n" // temp = *ptr |
75 "addu %1, %0, %3\n" // temp2 = temp + increment | 76 "addu %1, %0, %2\n" // temp2 = temp + increment |
76 "sc %1, %2\n" // *ptr = temp2 (with atomic check) | 77 "sc %1, 0(%3)\n" // *ptr = temp2 (with atomic check) |
77 "beqz %1, 1b\n" // start again on atomic error | 78 "beqz %1, 1b\n" // start again on atomic error |
78 "addu %1, %0, %3\n" // temp2 = temp + increment | 79 "addu %1, %0, %2\n" // temp2 = temp + increment |
79 ".set pop\n" | 80 ".set pop\n" |
80 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 81 : "=&r" (temp), "=&r" (temp2) |
81 : "Ir" (increment), "m" (*ptr) | 82 : "Ir" (increment), "r" (ptr) |
82 : "memory"); | 83 : "memory"); |
83 // temp2 now holds the final value. | 84 // temp2 now holds the final value. |
84 return temp2; | 85 return temp2; |
85 } | 86 } |
86 | 87 |
87 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 88 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
88 Atomic32 increment) { | 89 Atomic32 increment) { |
89 MemoryBarrier(); | 90 MemoryBarrier(); |
90 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 91 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
91 MemoryBarrier(); | 92 MemoryBarrier(); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
150 } | 151 } |
151 | 152 |
152 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 153 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
153 MemoryBarrier(); | 154 MemoryBarrier(); |
154 return *ptr; | 155 return *ptr; |
155 } | 156 } |
156 | 157 |
157 } } // namespace v8::base | 158 } } // namespace v8::base |
158 | 159 |
159 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 160 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |