Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/base/atomicops.h

Issue 2912773002: Rename "NoBarrier" memory operations to "Relaxed". (Closed)
Patch Set: more Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // The routines exported by this module are subtle. If you use them, even if 5 // The routines exported by this module are subtle. If you use them, even if
6 // you get the code right, it will depend on careful reasoning about atomicity 6 // you get the code right, it will depend on careful reasoning about atomicity
7 // and memory ordering; it will be less readable, and harder to maintain. If 7 // and memory ordering; it will be less readable, and harder to maintain. If
8 // you plan to use these routines, you should have a good reason, such as solid 8 // you plan to use these routines, you should have a good reason, such as solid
9 // evidence that performance would otherwise suffer, or there being no 9 // evidence that performance would otherwise suffer, or there being no
10 // alternative. You should assume only properties explicitly guaranteed by the 10 // alternative. You should assume only properties explicitly guaranteed by the
11 // specifications in this file. You are almost certainly _not_ writing code 11 // specifications in this file. You are almost certainly _not_ writing code
12 // just for the x86; if you assume x86 semantics, x86 hardware bugs and 12 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
13 // implementations on other archtectures will cause your code to break. If you 13 // implementations on other archtectures will cause your code to break. If you
14 // do not know what you are doing, avoid these routines, and use a Mutex. 14 // do not know what you are doing, avoid these routines, and use a Mutex.
15 // 15 //
16 // It is incorrect to make direct assignments to/from an atomic variable. 16 // It is incorrect to make direct assignments to/from an atomic variable.
17 // You should use one of the Load or Store routines. The NoBarrier 17 // You should use one of the Load or Store routines. The Relaxed versions
18 // versions are provided when no barriers are needed: 18 // are provided when no fences are needed:
19 // NoBarrier_Store() 19 // Relaxed_Store()
20 // NoBarrier_Load() 20 // Relaxed_Load()
21 // Although there are currently no compiler enforcement, you are encouraged 21 // Although there are currently no compiler enforcement, you are encouraged
22 // to use these. 22 // to use these.
23 // 23 //
24 24
25 #ifndef V8_BASE_ATOMICOPS_H_ 25 #ifndef V8_BASE_ATOMICOPS_H_
26 #define V8_BASE_ATOMICOPS_H_ 26 #define V8_BASE_ATOMICOPS_H_
27 27
28 #include <stdint.h> 28 #include <stdint.h>
29 29
30 // Small C++ header which defines implementation specific macros used to 30 // Small C++ header which defines implementation specific macros used to
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 // Atomically execute: 67 // Atomically execute:
68 // result = *ptr; 68 // result = *ptr;
69 // if (*ptr == old_value) 69 // if (*ptr == old_value)
70 // *ptr = new_value; 70 // *ptr = new_value;
71 // return result; 71 // return result;
72 // 72 //
73 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". 73 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
74 // Always return the old value of "*ptr" 74 // Always return the old value of "*ptr"
75 // 75 //
76 // This routine implies no memory barriers. 76 // This routine implies no memory barriers.
77 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 77 Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value,
78 Atomic32 old_value, 78 Atomic32 new_value);
79 Atomic32 new_value);
80 79
81 // Atomically store new_value into *ptr, returning the previous value held in 80 // Atomically store new_value into *ptr, returning the previous value held in
82 // *ptr. This routine implies no memory barriers. 81 // *ptr. This routine implies no memory barriers.
83 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); 82 Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
84 83
85 // Atomically increment *ptr by "increment". Returns the new value of 84 // Atomically increment *ptr by "increment". Returns the new value of
86 // *ptr with the increment applied. This routine implies no memory barriers. 85 // *ptr with the increment applied. This routine implies no memory barriers.
87 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); 86 Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
88 87
89 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 88 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
90 Atomic32 increment); 89 Atomic32 increment);
91 90
92 // These following lower-level operations are typically useful only to people 91 // These following lower-level operations are typically useful only to people
93 // implementing higher-level synchronization operations like spinlocks, 92 // implementing higher-level synchronization operations like spinlocks,
94 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or 93 // mutexes, and condition-variables. They combine CompareAndSwap(), a load, or
95 // a store with appropriate memory-ordering instructions. "Acquire" operations 94 // a store with appropriate memory-ordering instructions. "Acquire" operations
96 // ensure that no later memory access can be reordered ahead of the operation. 95 // ensure that no later memory access can be reordered ahead of the operation.
97 // "Release" operations ensure that no previous memory access can be reordered 96 // "Release" operations ensure that no previous memory access can be reordered
98 // after the operation. "Barrier" operations have both "Acquire" and "Release" 97 // after the operation. "Barrier" operations have both "Acquire" and "Release"
99 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory 98 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
100 // access. 99 // access.
101 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 100 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
102 Atomic32 old_value, 101 Atomic32 old_value,
103 Atomic32 new_value); 102 Atomic32 new_value);
104 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 103 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
105 Atomic32 old_value, 104 Atomic32 old_value,
106 Atomic32 new_value); 105 Atomic32 new_value);
107 106
108 void MemoryBarrier(); 107 void MemoryBarrier();
ulan 2017/05/29 13:08:20 I will rename this in another CL.
109 void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value); 108 void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value);
110 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value); 109 void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value);
111 void Release_Store(volatile Atomic32* ptr, Atomic32 value); 110 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
112 111
113 Atomic8 NoBarrier_Load(volatile const Atomic8* ptr); 112 Atomic8 Relaxed_Load(volatile const Atomic8* ptr);
114 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr); 113 Atomic32 Relaxed_Load(volatile const Atomic32* ptr);
115 Atomic32 Acquire_Load(volatile const Atomic32* ptr); 114 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
116 115
117 // 64-bit atomic operations (only available on 64-bit processors). 116 // 64-bit atomic operations (only available on 64-bit processors).
118 #ifdef V8_HOST_ARCH_64_BIT 117 #ifdef V8_HOST_ARCH_64_BIT
119 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 118 Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value,
120 Atomic64 old_value, 119 Atomic64 new_value);
121 Atomic64 new_value); 120 Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
122 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); 121 Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
123 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
124 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); 122 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
ulan 2017/05/29 13:08:20 I will rename this in another CL.
125 123
126 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 124 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
127 Atomic64 old_value, 125 Atomic64 old_value,
128 Atomic64 new_value); 126 Atomic64 new_value);
129 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 127 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
130 Atomic64 old_value, 128 Atomic64 old_value,
131 Atomic64 new_value); 129 Atomic64 new_value);
132 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value); 130 void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value);
133 void Release_Store(volatile Atomic64* ptr, Atomic64 value); 131 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
134 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr); 132 Atomic64 Relaxed_Load(volatile const Atomic64* ptr);
135 Atomic64 Acquire_Load(volatile const Atomic64* ptr); 133 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
136 #endif // V8_HOST_ARCH_64_BIT 134 #endif // V8_HOST_ARCH_64_BIT
137 135
138 } // namespace base 136 } // namespace base
139 } // namespace v8 137 } // namespace v8
140 138
141 #if defined(V8_OS_WIN) 139 #if defined(V8_OS_WIN)
142 // TODO(hpayer): The MSVC header includes windows.h, which other files end up 140 // TODO(hpayer): The MSVC header includes windows.h, which other files end up
143 // relying on. Fix this as part of crbug.com/559247. 141 // relying on. Fix this as part of crbug.com/559247.
144 #include "src/base/atomicops_internals_x86_msvc.h" 142 #include "src/base/atomicops_internals_x86_msvc.h"
145 #else 143 #else
146 #include "src/base/atomicops_internals_portable.h" 144 #include "src/base/atomicops_internals_portable.h"
147 #endif 145 #endif
148 146
149 // On some platforms we need additional declarations to make 147 // On some platforms we need additional declarations to make
150 // AtomicWord compatible with our other Atomic* types. 148 // AtomicWord compatible with our other Atomic* types.
151 #if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX) 149 #if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX)
152 #include "src/base/atomicops_internals_atomicword_compat.h" 150 #include "src/base/atomicops_internals_atomicword_compat.h"
153 #endif 151 #endif
154 152
155 #endif // V8_BASE_ATOMICOPS_H_ 153 #endif // V8_BASE_ATOMICOPS_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698