Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(204)

Side by Side Diff: third_party/libphonenumber/cpp/src/base/atomicops_internals_x86_macosx.h

Issue 6803005: Autofill phone number enhancements and integration of Phone Number Util Library: part 1 (Closed) Base URL: svn://chrome-svn/chrome/trunk/src/
Patch Set: '' Created 9 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file is an internal atomic implementation, use base/atomicops.h instead.
6
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
8 #define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
9 #pragma once
10
11 #include <libkern/OSAtomic.h>
12
13 namespace base {
14 namespace subtle {
15
16 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
17 Atomic32 old_value,
18 Atomic32 new_value) {
19 Atomic32 prev_value;
20 do {
21 if (OSAtomicCompareAndSwap32(old_value, new_value,
22 const_cast<Atomic32*>(ptr))) {
23 return old_value;
24 }
25 prev_value = *ptr;
26 } while (prev_value == old_value);
27 return prev_value;
28 }
29
30 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
31 Atomic32 new_value) {
32 Atomic32 old_value;
33 do {
34 old_value = *ptr;
35 } while (!OSAtomicCompareAndSwap32(old_value, new_value,
36 const_cast<Atomic32*>(ptr)));
37 return old_value;
38 }
39
40 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
41 Atomic32 increment) {
42 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
43 }
44
45 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
46 Atomic32 increment) {
47 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
48 }
49
50 inline void MemoryBarrier() {
51 OSMemoryBarrier();
52 }
53
54 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
55 Atomic32 old_value,
56 Atomic32 new_value) {
57 Atomic32 prev_value;
58 do {
59 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
60 const_cast<Atomic32*>(ptr))) {
61 return old_value;
62 }
63 prev_value = *ptr;
64 } while (prev_value == old_value);
65 return prev_value;
66 }
67
68 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
69 Atomic32 old_value,
70 Atomic32 new_value) {
71 return Acquire_CompareAndSwap(ptr, old_value, new_value);
72 }
73
74 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
75 *ptr = value;
76 }
77
78 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
79 *ptr = value;
80 MemoryBarrier();
81 }
82
83 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
84 MemoryBarrier();
85 *ptr = value;
86 }
87
88 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
89 return *ptr;
90 }
91
92 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
93 Atomic32 value = *ptr;
94 MemoryBarrier();
95 return value;
96 }
97
98 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
99 MemoryBarrier();
100 return *ptr;
101 }
102
103 #ifdef __LP64__
104
105 // 64-bit implementation on 64-bit platform
106
107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
108 Atomic64 old_value,
109 Atomic64 new_value) {
110 Atomic64 prev_value;
111 do {
112 if (OSAtomicCompareAndSwap64(old_value, new_value,
113 reinterpret_cast<volatile int64_t*>(ptr))) {
114 return old_value;
115 }
116 prev_value = *ptr;
117 } while (prev_value == old_value);
118 return prev_value;
119 }
120
121 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
122 Atomic64 new_value) {
123 Atomic64 old_value;
124 do {
125 old_value = *ptr;
126 } while (!OSAtomicCompareAndSwap64(old_value, new_value,
127 reinterpret_cast<volatile int64_t*>(ptr)));
128 return old_value;
129 }
130
131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
132 Atomic64 increment) {
133 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr));
134 }
135
136 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
137 Atomic64 increment) {
138 return OSAtomicAdd64Barrier(increment,
139 reinterpret_cast<volatile int64_t*>(ptr));
140 }
141
142 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
143 Atomic64 old_value,
144 Atomic64 new_value) {
145 Atomic64 prev_value;
146 do {
147 if (OSAtomicCompareAndSwap64Barrier(
148 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) {
149 return old_value;
150 }
151 prev_value = *ptr;
152 } while (prev_value == old_value);
153 return prev_value;
154 }
155
156 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
157 Atomic64 old_value,
158 Atomic64 new_value) {
159 // The lib kern interface does not distinguish between
160 // Acquire and Release memory barriers; they are equivalent.
161 return Acquire_CompareAndSwap(ptr, old_value, new_value);
162 }
163
164 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
165 *ptr = value;
166 }
167
168 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
169 *ptr = value;
170 MemoryBarrier();
171 }
172
173 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
174 MemoryBarrier();
175 *ptr = value;
176 }
177
178 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
179 return *ptr;
180 }
181
182 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
183 Atomic64 value = *ptr;
184 MemoryBarrier();
185 return value;
186 }
187
188 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
189 MemoryBarrier();
190 return *ptr;
191 }
192
193 #endif // defined(__LP64__)
194
195 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
196 // on the Mac, even when they are the same size. We need to explicitly cast
197 // from AtomicWord to Atomic32 to implement the AtomicWord interface.
198 // When in 64-bit mode, AtomicWord is the same as Atomic64, so we need not
199 // add duplicate definitions.
200 #ifndef __LP64__
201 #define AtomicWordCastType Atomic32
202
203 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr,
204 AtomicWord old_value,
205 AtomicWord new_value) {
206 return NoBarrier_CompareAndSwap(
207 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
208 old_value, new_value);
209 }
210
211 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr,
212 AtomicWord new_value) {
213 return NoBarrier_AtomicExchange(
214 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value);
215 }
216
217 inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr,
218 AtomicWord increment) {
219 return NoBarrier_AtomicIncrement(
220 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
221 }
222
223 inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr,
224 AtomicWord increment) {
225 return Barrier_AtomicIncrement(
226 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment);
227 }
228
229 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr,
230 AtomicWord old_value,
231 AtomicWord new_value) {
232 return base::subtle::Acquire_CompareAndSwap(
233 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
234 old_value, new_value);
235 }
236
237 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr,
238 AtomicWord old_value,
239 AtomicWord new_value) {
240 return base::subtle::Release_CompareAndSwap(
241 reinterpret_cast<volatile AtomicWordCastType*>(ptr),
242 old_value, new_value);
243 }
244
245 inline void NoBarrier_Store(volatile AtomicWord *ptr, AtomicWord value) {
246 NoBarrier_Store(
247 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
248 }
249
250 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) {
251 return base::subtle::Acquire_Store(
252 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
253 }
254
255 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) {
256 return base::subtle::Release_Store(
257 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value);
258 }
259
260 inline AtomicWord NoBarrier_Load(volatile const AtomicWord *ptr) {
261 return NoBarrier_Load(
262 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
263 }
264
265 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) {
266 return base::subtle::Acquire_Load(
267 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
268 }
269
270 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) {
271 return base::subtle::Release_Load(
272 reinterpret_cast<volatile const AtomicWordCastType*>(ptr));
273 }
274
275 #undef AtomicWordCastType
276 #endif
277
278 } // namespace base::subtle
279 } // namespace base
280
281 #endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698