Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: src/runtime/runtime-atomics.cc

Issue 1550803006: Convert runtime atomics functions to inline asm (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: update BUILD.gn, add *-inl.h and *.h to gyp as well Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « BUILD.gn ('k') | src/runtime/runtime-atomics-ia32-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/runtime/runtime-utils.h" 5 #include "src/runtime/runtime-utils.h"
6 6
7 #include "src/arguments.h" 7 #include "src/arguments.h"
8 #include "src/base/macros.h" 8 #include "src/base/macros.h"
9 #include "src/base/platform/mutex.h" 9 #include "src/base/platform/mutex.h"
10 #include "src/conversions-inl.h" 10 #include "src/conversions-inl.h"
11 #include "src/factory.h" 11 #include "src/factory.h"
12 12
13 // Implement Atomic accesses to SharedArrayBuffers as defined in the 13 // Implement Atomic accesses to SharedArrayBuffers as defined in the
14 // SharedArrayBuffer draft spec, found here 14 // SharedArrayBuffer draft spec, found here
15 // https://github.com/lars-t-hansen/ecmascript_sharedmem 15 // https://github.com/lars-t-hansen/ecmascript_sharedmem
16 16
17 #if V8_TARGET_ARCH_IA32
18 #include "src/runtime/runtime-atomics-ia32-inl.h" // NOLINT
19 #elif V8_TARGET_ARCH_X64
20 #if V8_CC_MSVC
21 // MSVC for x64 does not support inline assembly, so the implementations are
22 // in a separate assembly source file. This header just includes the
23 // declarations.
24 #include "src/runtime/runtime-atomics-x64.h" // NOLINT
25 #else
26 #include "src/runtime/runtime-atomics-x64-inl.h" // NOLINT
27 #endif
28 #else
29 // TODO(binji): implement for all target architectures.
30 #include "src/runtime/runtime-atomics-intrinsics-inl.h" // NOLINT
31 #endif
32
17 namespace v8 { 33 namespace v8 {
18 namespace internal { 34 namespace internal {
19 35
20 namespace { 36 namespace {
21 37
22 inline bool AtomicIsLockFree(uint32_t size) { 38 inline bool IsLockFree(uint32_t size) {
23 return size == 1 || size == 2 || size == 4; 39 return size == 1 || size == 2 || size == 4;
24 } 40 }
25 41
26 #if V8_CC_GNU
27
28 template <typename T>
29 inline T CompareExchangeSeqCst(T* p, T oldval, T newval) {
30 (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST,
31 __ATOMIC_SEQ_CST);
32 return oldval;
33 }
34
35 template <typename T>
36 inline T LoadSeqCst(T* p) {
37 T result;
38 __atomic_load(p, &result, __ATOMIC_SEQ_CST);
39 return result;
40 }
41
42 template <typename T>
43 inline void StoreSeqCst(T* p, T value) {
44 __atomic_store_n(p, value, __ATOMIC_SEQ_CST);
45 }
46
47 template <typename T>
48 inline T AddSeqCst(T* p, T value) {
49 return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST);
50 }
51
52 template <typename T>
53 inline T SubSeqCst(T* p, T value) {
54 return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST);
55 }
56
57 template <typename T>
58 inline T AndSeqCst(T* p, T value) {
59 return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST);
60 }
61
62 template <typename T>
63 inline T OrSeqCst(T* p, T value) {
64 return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST);
65 }
66
67 template <typename T>
68 inline T XorSeqCst(T* p, T value) {
69 return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST);
70 }
71
72 template <typename T>
73 inline T ExchangeSeqCst(T* p, T value) {
74 return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST);
75 }
76
77 #elif V8_CC_MSVC
78
79 #define InterlockedCompareExchange32 _InterlockedCompareExchange
80 #define InterlockedExchange32 _InterlockedExchange
81 #define InterlockedExchangeAdd32 _InterlockedExchangeAdd
82 #define InterlockedAnd32 _InterlockedAnd
83 #define InterlockedOr32 _InterlockedOr
84 #define InterlockedXor32 _InterlockedXor
85 #define InterlockedExchangeAdd16 _InterlockedExchangeAdd16
86 #define InterlockedCompareExchange8 _InterlockedCompareExchange8
87 #define InterlockedExchangeAdd8 _InterlockedExchangeAdd8
88
89 #define ATOMIC_OPS(type, suffix, vctype) \
90 inline type AddSeqCst(type* p, type value) { \
91 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
92 bit_cast<vctype>(value)); \
93 } \
94 inline type SubSeqCst(type* p, type value) { \
95 return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \
96 -bit_cast<vctype>(value)); \
97 } \
98 inline type AndSeqCst(type* p, type value) { \
99 return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \
100 bit_cast<vctype>(value)); \
101 } \
102 inline type OrSeqCst(type* p, type value) { \
103 return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \
104 bit_cast<vctype>(value)); \
105 } \
106 inline type XorSeqCst(type* p, type value) { \
107 return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \
108 bit_cast<vctype>(value)); \
109 } \
110 inline type ExchangeSeqCst(type* p, type value) { \
111 return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
112 bit_cast<vctype>(value)); \
113 } \
114 \
115 inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \
116 return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \
117 bit_cast<vctype>(newval), \
118 bit_cast<vctype>(oldval)); \
119 } \
120 inline type LoadSeqCst(type* p) { return *p; } \
121 inline void StoreSeqCst(type* p, type value) { \
122 InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \
123 bit_cast<vctype>(value)); \
124 }
125
126 ATOMIC_OPS(int8_t, 8, char)
127 ATOMIC_OPS(uint8_t, 8, char)
128 ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */
129 ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */
130 ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */
131 ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */
132
133 #undef ATOMIC_OPS_INTEGER
134 #undef ATOMIC_OPS
135
136 #undef InterlockedCompareExchange32
137 #undef InterlockedExchange32
138 #undef InterlockedExchangeAdd32
139 #undef InterlockedAnd32
140 #undef InterlockedOr32
141 #undef InterlockedXor32
142 #undef InterlockedExchangeAdd16
143 #undef InterlockedCompareExchange8
144 #undef InterlockedExchangeAdd8
145
146 #else
147
148 #error Unsupported platform!
149
150 #endif
151
152 template <typename T> 42 template <typename T>
153 T FromObject(Handle<Object> number); 43 T FromObject(Handle<Object> number);
154 44
155 template <> 45 template <>
156 inline uint8_t FromObject<uint8_t>(Handle<Object> number) { 46 inline uint8_t FromObject<uint8_t>(Handle<Object> number) {
157 return NumberToUint32(*number); 47 return NumberToUint32(*number);
158 } 48 }
159 49
160 template <> 50 template <>
161 inline int8_t FromObject<int8_t>(Handle<Object> number) { 51 inline int8_t FromObject<int8_t>(Handle<Object> number) {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
202 inline Object* ToObject(Isolate* isolate, uint32_t t) { 92 inline Object* ToObject(Isolate* isolate, uint32_t t) {
203 return *isolate->factory()->NewNumber(t); 93 return *isolate->factory()->NewNumber(t);
204 } 94 }
205 95
206 96
207 template <typename T> 97 template <typename T>
208 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, 98 inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index,
209 Handle<Object> oldobj, Handle<Object> newobj) { 99 Handle<Object> oldobj, Handle<Object> newobj) {
210 T oldval = FromObject<T>(oldobj); 100 T oldval = FromObject<T>(oldobj);
211 T newval = FromObject<T>(newobj); 101 T newval = FromObject<T>(newobj);
212 T result = 102 T result = atomics::CompareExchangeSeqCst(static_cast<T*>(buffer) + index,
213 CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); 103 oldval, newval);
214 return ToObject(isolate, result); 104 return ToObject(isolate, result);
215 } 105 }
216 106
217 107
218 template <typename T> 108 template <typename T>
219 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { 109 inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) {
220 T result = LoadSeqCst(static_cast<T*>(buffer) + index); 110 T result = atomics::LoadSeqCst(static_cast<T*>(buffer) + index);
221 return ToObject(isolate, result); 111 return ToObject(isolate, result);
222 } 112 }
223 113
224 114
225 template <typename T> 115 template <typename T>
226 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, 116 inline Object* DoStore(Isolate* isolate, void* buffer, size_t index,
227 Handle<Object> obj) { 117 Handle<Object> obj) {
228 T value = FromObject<T>(obj); 118 T value = FromObject<T>(obj);
229 StoreSeqCst(static_cast<T*>(buffer) + index, value); 119 atomics::StoreSeqCst(static_cast<T*>(buffer) + index, value);
230 return *obj; 120 return *obj;
231 } 121 }
232 122
233 123
234 template <typename T> 124 template <typename T>
235 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, 125 inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index,
236 Handle<Object> obj) { 126 Handle<Object> obj) {
237 T value = FromObject<T>(obj); 127 T value = FromObject<T>(obj);
238 T result = AddSeqCst(static_cast<T*>(buffer) + index, value); 128 T result = atomics::AddSeqCst(static_cast<T*>(buffer) + index, value);
239 return ToObject(isolate, result); 129 return ToObject(isolate, result);
240 } 130 }
241 131
242 132
243 template <typename T> 133 template <typename T>
244 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, 134 inline Object* DoSub(Isolate* isolate, void* buffer, size_t index,
245 Handle<Object> obj) { 135 Handle<Object> obj) {
246 T value = FromObject<T>(obj); 136 T value = FromObject<T>(obj);
247 T result = SubSeqCst(static_cast<T*>(buffer) + index, value); 137 T result = atomics::SubSeqCst(static_cast<T*>(buffer) + index, value);
248 return ToObject(isolate, result); 138 return ToObject(isolate, result);
249 } 139 }
250 140
251 141
252 template <typename T> 142 template <typename T>
253 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, 143 inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index,
254 Handle<Object> obj) { 144 Handle<Object> obj) {
255 T value = FromObject<T>(obj); 145 T value = FromObject<T>(obj);
256 T result = AndSeqCst(static_cast<T*>(buffer) + index, value); 146 T result = atomics::AndSeqCst(static_cast<T*>(buffer) + index, value);
257 return ToObject(isolate, result); 147 return ToObject(isolate, result);
258 } 148 }
259 149
260 150
261 template <typename T> 151 template <typename T>
262 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, 152 inline Object* DoOr(Isolate* isolate, void* buffer, size_t index,
263 Handle<Object> obj) { 153 Handle<Object> obj) {
264 T value = FromObject<T>(obj); 154 T value = FromObject<T>(obj);
265 T result = OrSeqCst(static_cast<T*>(buffer) + index, value); 155 T result = atomics::OrSeqCst(static_cast<T*>(buffer) + index, value);
266 return ToObject(isolate, result); 156 return ToObject(isolate, result);
267 } 157 }
268 158
269 159
270 template <typename T> 160 template <typename T>
271 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, 161 inline Object* DoXor(Isolate* isolate, void* buffer, size_t index,
272 Handle<Object> obj) { 162 Handle<Object> obj) {
273 T value = FromObject<T>(obj); 163 T value = FromObject<T>(obj);
274 T result = XorSeqCst(static_cast<T*>(buffer) + index, value); 164 T result = atomics::XorSeqCst(static_cast<T*>(buffer) + index, value);
275 return ToObject(isolate, result); 165 return ToObject(isolate, result);
276 } 166 }
277 167
278 168
279 template <typename T> 169 template <typename T>
280 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, 170 inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index,
281 Handle<Object> obj) { 171 Handle<Object> obj) {
282 T value = FromObject<T>(obj); 172 T value = FromObject<T>(obj);
283 T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); 173 T result = atomics::ExchangeSeqCst(static_cast<T*>(buffer) + index, value);
284 return ToObject(isolate, result); 174 return ToObject(isolate, result);
285 } 175 }
286 176
287 177
288 // Uint8Clamped functions 178 // Uint8Clamped functions
289 179
290 uint8_t ClampToUint8(int32_t value) { 180 uint8_t ClampToUint8(int32_t value) {
291 if (value < 0) return 0; 181 if (value < 0) return 0;
292 if (value > 255) return 255; 182 if (value > 255) return 255;
293 return value; 183 return value;
294 } 184 }
295 185
296 186
297 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, 187 inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer,
298 size_t index, 188 size_t index,
299 Handle<Object> oldobj, 189 Handle<Object> oldobj,
300 Handle<Object> newobj) { 190 Handle<Object> newobj) {
301 typedef int32_t convert_type; 191 typedef int32_t convert_type;
302 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); 192 uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj));
303 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); 193 uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj));
304 uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, 194 uint8_t result = atomics::CompareExchangeSeqCst(
305 oldval, newval); 195 static_cast<uint8_t*>(buffer) + index, oldval, newval);
306 return ToObject(isolate, result); 196 return ToObject(isolate, result);
307 } 197 }
308 198
309 199
310 inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index, 200 inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index,
311 Handle<Object> obj) { 201 Handle<Object> obj) {
312 typedef int32_t convert_type; 202 typedef int32_t convert_type;
313 uint8_t value = ClampToUint8(FromObject<convert_type>(obj)); 203 uint8_t value = ClampToUint8(FromObject<convert_type>(obj));
314 StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value); 204 atomics::StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value);
315 return *obj; 205 return *obj;
316 } 206 }
317 207
318 208
319 #define DO_UINT8_CLAMPED_OP(name, op) \ 209 #define DO_UINT8_CLAMPED_OP(name, op) \
320 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ 210 inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \
321 size_t index, Handle<Object> obj) { \ 211 size_t index, Handle<Object> obj) { \
322 typedef int32_t convert_type; \ 212 typedef int32_t convert_type; \
323 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ 213 uint8_t* p = static_cast<uint8_t*>(buffer) + index; \
324 convert_type operand = FromObject<convert_type>(obj); \ 214 convert_type operand = FromObject<convert_type>(obj); \
325 uint8_t expected; \ 215 uint8_t expected; \
326 uint8_t result; \ 216 uint8_t result; \
327 do { \ 217 do { \
328 expected = *p; \ 218 expected = *p; \
329 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ 219 result = ClampToUint8(static_cast<convert_type>(expected) op operand); \
330 } while (CompareExchangeSeqCst(p, expected, result) != expected); \ 220 } while (atomics::CompareExchangeSeqCst(p, expected, result) != expected); \
331 return ToObject(isolate, expected); \ 221 return ToObject(isolate, expected); \
332 } 222 }
333 223
334 DO_UINT8_CLAMPED_OP(Add, +) 224 DO_UINT8_CLAMPED_OP(Add, +)
335 DO_UINT8_CLAMPED_OP(Sub, -) 225 DO_UINT8_CLAMPED_OP(Sub, -)
336 DO_UINT8_CLAMPED_OP(And, &) 226 DO_UINT8_CLAMPED_OP(And, &)
337 DO_UINT8_CLAMPED_OP(Or, | ) 227 DO_UINT8_CLAMPED_OP(Or, | )
338 DO_UINT8_CLAMPED_OP(Xor, ^) 228 DO_UINT8_CLAMPED_OP(Xor, ^)
339 229
340 #undef DO_UINT8_CLAMPED_OP 230 #undef DO_UINT8_CLAMPED_OP
341 231
342 232
343 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, 233 inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer,
344 size_t index, Handle<Object> obj) { 234 size_t index, Handle<Object> obj) {
345 typedef int32_t convert_type; 235 typedef int32_t convert_type;
346 uint8_t* p = static_cast<uint8_t*>(buffer) + index; 236 uint8_t* p = static_cast<uint8_t*>(buffer) + index;
347 uint8_t result = ClampToUint8(FromObject<convert_type>(obj)); 237 uint8_t result = ClampToUint8(FromObject<convert_type>(obj));
348 uint8_t expected; 238 uint8_t expected;
349 do { 239 do {
350 expected = *p; 240 expected = *p;
351 } while (CompareExchangeSeqCst(p, expected, result) != expected); 241 } while (atomics::CompareExchangeSeqCst(p, expected, result) != expected);
352 return ToObject(isolate, expected); 242 return ToObject(isolate, expected);
353 } 243 }
354 244
355 245
356 } // anonymous namespace 246 } // anonymous namespace
357 247
358 // Duplicated from objects.h 248 // Duplicated from objects.h
359 // V has parameters (Type, type, TYPE, C type, element_size) 249 // V has parameters (Type, type, TYPE, C type, element_size)
360 #define INTEGER_TYPED_ARRAYS(V) \ 250 #define INTEGER_TYPED_ARRAYS(V) \
361 V(Uint8, uint8, UINT8, uint8_t, 1) \ 251 V(Uint8, uint8, UINT8, uint8_t, 1) \
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after
653 UNREACHABLE(); 543 UNREACHABLE();
654 return isolate->heap()->undefined_value(); 544 return isolate->heap()->undefined_value();
655 } 545 }
656 546
657 547
658 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { 548 RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) {
659 HandleScope scope(isolate); 549 HandleScope scope(isolate);
660 DCHECK(args.length() == 1); 550 DCHECK(args.length() == 1);
661 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); 551 CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0);
662 uint32_t usize = NumberToUint32(*size); 552 uint32_t usize = NumberToUint32(*size);
663 return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); 553 return isolate->heap()->ToBoolean(IsLockFree(usize));
664 } 554 }
665 } // namespace internal 555 } // namespace internal
666 } // namespace v8 556 } // namespace v8
OLDNEW
« no previous file with comments | « BUILD.gn ('k') | src/runtime/runtime-atomics-ia32-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698