Index: src/runtime/runtime-atomics.cc |
diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc |
index 94d98d4ffaf77a46e4aec1cb088bd55b36e406e4..609d02b890a43a82d00ac7bb401412a66e908cae 100644 |
--- a/src/runtime/runtime-atomics.cc |
+++ b/src/runtime/runtime-atomics.cc |
@@ -14,141 +14,31 @@ |
// SharedArrayBuffer draft spec, found here |
// https://github.com/lars-t-hansen/ecmascript_sharedmem |
+#if V8_TARGET_ARCH_IA32 |
+#include "src/runtime/runtime-atomics-ia32-inl.h" // NOLINT |
+#elif V8_TARGET_ARCH_X64 |
+#if V8_CC_MSVC |
+// MSVC for x64 does not support inline assembly, so the implementations are |
+// in a separate assembly source file. This header just includes the |
+// declarations. |
+#include "src/runtime/runtime-atomics-x64.h" // NOLINT |
+#else |
+#include "src/runtime/runtime-atomics-x64-inl.h" // NOLINT |
+#endif |
+#else |
+// TODO(binji): implement for all target architectures. |
+#include "src/runtime/runtime-atomics-intrinsics-inl.h" // NOLINT |
+#endif |
+ |
namespace v8 { |
namespace internal { |
namespace { |
-inline bool AtomicIsLockFree(uint32_t size) { |
+inline bool IsLockFree(uint32_t size) { |
return size == 1 || size == 2 || size == 4; |
} |
-#if V8_CC_GNU |
- |
-template <typename T> |
-inline T CompareExchangeSeqCst(T* p, T oldval, T newval) { |
- (void)__atomic_compare_exchange_n(p, &oldval, newval, 0, __ATOMIC_SEQ_CST, |
- __ATOMIC_SEQ_CST); |
- return oldval; |
-} |
- |
-template <typename T> |
-inline T LoadSeqCst(T* p) { |
- T result; |
- __atomic_load(p, &result, __ATOMIC_SEQ_CST); |
- return result; |
-} |
- |
-template <typename T> |
-inline void StoreSeqCst(T* p, T value) { |
- __atomic_store_n(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T AddSeqCst(T* p, T value) { |
- return __atomic_fetch_add(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T SubSeqCst(T* p, T value) { |
- return __atomic_fetch_sub(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T AndSeqCst(T* p, T value) { |
- return __atomic_fetch_and(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T OrSeqCst(T* p, T value) { |
- return __atomic_fetch_or(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T XorSeqCst(T* p, T value) { |
- return __atomic_fetch_xor(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-template <typename T> |
-inline T ExchangeSeqCst(T* p, T value) { |
- return __atomic_exchange_n(p, value, __ATOMIC_SEQ_CST); |
-} |
- |
-#elif V8_CC_MSVC |
- |
-#define InterlockedCompareExchange32 _InterlockedCompareExchange |
-#define InterlockedExchange32 _InterlockedExchange |
-#define InterlockedExchangeAdd32 _InterlockedExchangeAdd |
-#define InterlockedAnd32 _InterlockedAnd |
-#define InterlockedOr32 _InterlockedOr |
-#define InterlockedXor32 _InterlockedXor |
-#define InterlockedExchangeAdd16 _InterlockedExchangeAdd16 |
-#define InterlockedCompareExchange8 _InterlockedCompareExchange8 |
-#define InterlockedExchangeAdd8 _InterlockedExchangeAdd8 |
- |
-#define ATOMIC_OPS(type, suffix, vctype) \ |
- inline type AddSeqCst(type* p, type value) { \ |
- return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } \ |
- inline type SubSeqCst(type* p, type value) { \ |
- return InterlockedExchangeAdd##suffix(reinterpret_cast<vctype*>(p), \ |
- -bit_cast<vctype>(value)); \ |
- } \ |
- inline type AndSeqCst(type* p, type value) { \ |
- return InterlockedAnd##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } \ |
- inline type OrSeqCst(type* p, type value) { \ |
- return InterlockedOr##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } \ |
- inline type XorSeqCst(type* p, type value) { \ |
- return InterlockedXor##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } \ |
- inline type ExchangeSeqCst(type* p, type value) { \ |
- return InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } \ |
- \ |
- inline type CompareExchangeSeqCst(type* p, type oldval, type newval) { \ |
- return InterlockedCompareExchange##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(newval), \ |
- bit_cast<vctype>(oldval)); \ |
- } \ |
- inline type LoadSeqCst(type* p) { return *p; } \ |
- inline void StoreSeqCst(type* p, type value) { \ |
- InterlockedExchange##suffix(reinterpret_cast<vctype*>(p), \ |
- bit_cast<vctype>(value)); \ |
- } |
- |
-ATOMIC_OPS(int8_t, 8, char) |
-ATOMIC_OPS(uint8_t, 8, char) |
-ATOMIC_OPS(int16_t, 16, short) /* NOLINT(runtime/int) */ |
-ATOMIC_OPS(uint16_t, 16, short) /* NOLINT(runtime/int) */ |
-ATOMIC_OPS(int32_t, 32, long) /* NOLINT(runtime/int) */ |
-ATOMIC_OPS(uint32_t, 32, long) /* NOLINT(runtime/int) */ |
- |
-#undef ATOMIC_OPS_INTEGER |
-#undef ATOMIC_OPS |
- |
-#undef InterlockedCompareExchange32 |
-#undef InterlockedExchange32 |
-#undef InterlockedExchangeAdd32 |
-#undef InterlockedAnd32 |
-#undef InterlockedOr32 |
-#undef InterlockedXor32 |
-#undef InterlockedExchangeAdd16 |
-#undef InterlockedCompareExchange8 |
-#undef InterlockedExchangeAdd8 |
- |
-#else |
- |
-#error Unsupported platform! |
- |
-#endif |
- |
template <typename T> |
T FromObject(Handle<Object> number); |
@@ -209,15 +99,15 @@ inline Object* DoCompareExchange(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> oldobj, Handle<Object> newobj) { |
T oldval = FromObject<T>(oldobj); |
T newval = FromObject<T>(newobj); |
- T result = |
- CompareExchangeSeqCst(static_cast<T*>(buffer) + index, oldval, newval); |
+ T result = atomics::CompareExchangeSeqCst(static_cast<T*>(buffer) + index, |
+ oldval, newval); |
return ToObject(isolate, result); |
} |
template <typename T> |
inline Object* DoLoad(Isolate* isolate, void* buffer, size_t index) { |
- T result = LoadSeqCst(static_cast<T*>(buffer) + index); |
+ T result = atomics::LoadSeqCst(static_cast<T*>(buffer) + index); |
return ToObject(isolate, result); |
} |
@@ -226,7 +116,7 @@ template <typename T> |
inline Object* DoStore(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- StoreSeqCst(static_cast<T*>(buffer) + index, value); |
+ atomics::StoreSeqCst(static_cast<T*>(buffer) + index, value); |
return *obj; |
} |
@@ -235,7 +125,7 @@ template <typename T> |
inline Object* DoAdd(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = AddSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::AddSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -244,7 +134,7 @@ template <typename T> |
inline Object* DoSub(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = SubSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::SubSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -253,7 +143,7 @@ template <typename T> |
inline Object* DoAnd(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = AndSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::AndSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -262,7 +152,7 @@ template <typename T> |
inline Object* DoOr(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = OrSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::OrSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -271,7 +161,7 @@ template <typename T> |
inline Object* DoXor(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = XorSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::XorSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -280,7 +170,7 @@ template <typename T> |
inline Object* DoExchange(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
T value = FromObject<T>(obj); |
- T result = ExchangeSeqCst(static_cast<T*>(buffer) + index, value); |
+ T result = atomics::ExchangeSeqCst(static_cast<T*>(buffer) + index, value); |
return ToObject(isolate, result); |
} |
@@ -301,8 +191,8 @@ inline Object* DoCompareExchangeUint8Clamped(Isolate* isolate, void* buffer, |
typedef int32_t convert_type; |
uint8_t oldval = ClampToUint8(FromObject<convert_type>(oldobj)); |
uint8_t newval = ClampToUint8(FromObject<convert_type>(newobj)); |
- uint8_t result = CompareExchangeSeqCst(static_cast<uint8_t*>(buffer) + index, |
- oldval, newval); |
+ uint8_t result = atomics::CompareExchangeSeqCst( |
+ static_cast<uint8_t*>(buffer) + index, oldval, newval); |
return ToObject(isolate, result); |
} |
@@ -311,24 +201,24 @@ inline Object* DoStoreUint8Clamped(Isolate* isolate, void* buffer, size_t index, |
Handle<Object> obj) { |
typedef int32_t convert_type; |
uint8_t value = ClampToUint8(FromObject<convert_type>(obj)); |
- StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value); |
+ atomics::StoreSeqCst(static_cast<uint8_t*>(buffer) + index, value); |
return *obj; |
} |
-#define DO_UINT8_CLAMPED_OP(name, op) \ |
- inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ |
- size_t index, Handle<Object> obj) { \ |
- typedef int32_t convert_type; \ |
- uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ |
- convert_type operand = FromObject<convert_type>(obj); \ |
- uint8_t expected; \ |
- uint8_t result; \ |
- do { \ |
- expected = *p; \ |
- result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ |
- } while (CompareExchangeSeqCst(p, expected, result) != expected); \ |
- return ToObject(isolate, expected); \ |
+#define DO_UINT8_CLAMPED_OP(name, op) \ |
+ inline Object* Do##name##Uint8Clamped(Isolate* isolate, void* buffer, \ |
+ size_t index, Handle<Object> obj) { \ |
+ typedef int32_t convert_type; \ |
+ uint8_t* p = static_cast<uint8_t*>(buffer) + index; \ |
+ convert_type operand = FromObject<convert_type>(obj); \ |
+ uint8_t expected; \ |
+ uint8_t result; \ |
+ do { \ |
+ expected = *p; \ |
+ result = ClampToUint8(static_cast<convert_type>(expected) op operand); \ |
+ } while (atomics::CompareExchangeSeqCst(p, expected, result) != expected); \ |
+ return ToObject(isolate, expected); \ |
} |
DO_UINT8_CLAMPED_OP(Add, +) |
@@ -348,7 +238,7 @@ inline Object* DoExchangeUint8Clamped(Isolate* isolate, void* buffer, |
uint8_t expected; |
do { |
expected = *p; |
- } while (CompareExchangeSeqCst(p, expected, result) != expected); |
+ } while (atomics::CompareExchangeSeqCst(p, expected, result) != expected); |
return ToObject(isolate, expected); |
} |
@@ -660,7 +550,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsIsLockFree) { |
DCHECK(args.length() == 1); |
CONVERT_NUMBER_ARG_HANDLE_CHECKED(size, 0); |
uint32_t usize = NumberToUint32(*size); |
- return isolate->heap()->ToBoolean(AtomicIsLockFree(usize)); |
+ return isolate->heap()->ToBoolean(IsLockFree(usize)); |
} |
} // namespace internal |
} // namespace v8 |