Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(114)

Side by Side Diff: src/base/atomicops_internals_mac.h

Issue 694703003: Use compiler barrier instead of memory barrier for release/acquire atomic operations on mac. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file is an internal atomic implementation, use atomicops.h instead. 5 // This file is an internal atomic implementation, use atomicops.h instead.
6 6
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ 8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
9 9
10 #include <libkern/OSAtomic.h> 10 #include <libkern/OSAtomic.h>
11 11
12 namespace v8 { 12 namespace v8 {
13 namespace base { 13 namespace base {
14 14
15 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
16
17 inline void MemoryBarrier() { OSMemoryBarrier(); }
18
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 19 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
16 Atomic32 old_value, 20 Atomic32 old_value,
17 Atomic32 new_value) { 21 Atomic32 new_value) {
18 Atomic32 prev_value; 22 Atomic32 prev_value;
19 do { 23 do {
20 if (OSAtomicCompareAndSwap32(old_value, new_value, 24 if (OSAtomicCompareAndSwap32(old_value, new_value,
21 const_cast<Atomic32*>(ptr))) { 25 const_cast<Atomic32*>(ptr))) {
22 return old_value; 26 return old_value;
23 } 27 }
24 prev_value = *ptr; 28 prev_value = *ptr;
(...skipping 14 matching lines...) Expand all
39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 43 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
40 Atomic32 increment) { 44 Atomic32 increment) {
41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); 45 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
42 } 46 }
43 47
44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 48 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
45 Atomic32 increment) { 49 Atomic32 increment) {
46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); 50 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
47 } 51 }
48 52
49 inline void MemoryBarrier() {
50 OSMemoryBarrier();
51 }
52
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
54 Atomic32 old_value, 54 Atomic32 old_value,
55 Atomic32 new_value) { 55 Atomic32 new_value) {
56 Atomic32 prev_value; 56 Atomic32 prev_value;
57 do { 57 do {
58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, 58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
59 const_cast<Atomic32*>(ptr))) { 59 const_cast<Atomic32*>(ptr))) {
60 return old_value; 60 return old_value;
61 } 61 }
62 prev_value = *ptr; 62 prev_value = *ptr;
(...skipping 10 matching lines...) Expand all
73 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { 73 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
74 *ptr = value; 74 *ptr = value;
75 } 75 }
76 76
77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
78 *ptr = value; 78 *ptr = value;
79 } 79 }
80 80
81 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 81 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
82 *ptr = value; 82 *ptr = value;
83 MemoryBarrier(); 83 ATOMICOPS_COMPILER_BARRIER();
Jarin 2014/11/05 11:54:53 I think it is better not to touch anything else th
Hannes Payer (out of office) 2014/11/05 12:16:23 Done. However, I would also change the release sto
84 } 84 }
85 85
86 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 86 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
87 MemoryBarrier(); 87 ATOMICOPS_COMPILER_BARRIER();
88 *ptr = value; 88 *ptr = value;
89 } 89 }
90 90
91 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { 91 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
92 return *ptr; 92 return *ptr;
93 } 93 }
94 94
95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
96 return *ptr; 96 return *ptr;
97 } 97 }
98 98
99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { 99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
100 Atomic32 value = *ptr; 100 Atomic32 value = *ptr;
101 MemoryBarrier(); 101 ATOMICOPS_COMPILER_BARRIER();
Jarin 2014/11/05 11:54:53 How about just saying here // On x86 processors,
Hannes Payer (out of office) 2014/11/05 12:16:23 Done.
102 return value; 102 return value;
103 } 103 }
104 104
105 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 105 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
106 MemoryBarrier(); 106 ATOMICOPS_COMPILER_BARRIER();
107 return *ptr; 107 return *ptr;
108 } 108 }
109 109
110 #ifdef __LP64__ 110 #ifdef __LP64__
111 111
112 // 64-bit implementation on 64-bit platform 112 // 64-bit implementation on 64-bit platform
113 113
114 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 114 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
115 Atomic64 old_value, 115 Atomic64 old_value,
116 Atomic64 new_value) { 116 Atomic64 new_value) {
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
167 // Acquire and Release memory barriers; they are equivalent. 167 // Acquire and Release memory barriers; they are equivalent.
168 return Acquire_CompareAndSwap(ptr, old_value, new_value); 168 return Acquire_CompareAndSwap(ptr, old_value, new_value);
169 } 169 }
170 170
171 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 171 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
172 *ptr = value; 172 *ptr = value;
173 } 173 }
174 174
175 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 175 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
176 *ptr = value; 176 *ptr = value;
177 MemoryBarrier(); 177 ATOMICOPS_COMPILER_BARRIER();
178 } 178 }
179 179
180 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { 180 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
181 MemoryBarrier(); 181 ATOMICOPS_COMPILER_BARRIER();
182 *ptr = value; 182 *ptr = value;
183 } 183 }
184 184
185 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 185 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
186 return *ptr; 186 return *ptr;
187 } 187 }
188 188
189 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 189 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
190 Atomic64 value = *ptr; 190 Atomic64 value = *ptr;
191 MemoryBarrier(); 191 ATOMICOPS_COMPILER_BARRIER();
Jarin 2014/11/05 11:54:53 As above, maybe we want // On x86 processors, loa
Hannes Payer (out of office) 2014/11/05 12:16:23 Done.
192 return value; 192 return value;
193 } 193 }
194 194
195 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 195 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
196 MemoryBarrier(); 196 ATOMICOPS_COMPILER_BARRIER();
197 return *ptr; 197 return *ptr;
198 } 198 }
199 199
200 #endif // defined(__LP64__) 200 #endif // defined(__LP64__)
201 201
202 #undef ATOMICOPS_COMPILER_BARRIER
202 } } // namespace v8::base 203 } } // namespace v8::base
203 204
204 #endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ 205 #endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698