| Index: src/runtime/runtime-atomics-x64-inl.h
|
| diff --git a/src/runtime/runtime-atomics-x64-inl.h b/src/runtime/runtime-atomics-x64-inl.h
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..ad0512d9f674c4b3dac4453dc876a84577c62f53
|
| --- /dev/null
|
| +++ b/src/runtime/runtime-atomics-x64-inl.h
|
| @@ -0,0 +1,1391 @@
|
| +// Copyright 2016 the V8 project authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +namespace v8 {
|
| +namespace internal {
|
| +namespace atomics {
|
| +
|
| +// Load ////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t LoadSeqCst(uint8_t* p) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t result;
|
| + __asm__ __volatile__("movb (%1), %0" : "=q"(result) : "q"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t LoadSeqCst(int8_t* p) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t result;
|
| + __asm__ __volatile__("movb (%1), %0" : "=q"(result) : "q"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t LoadSeqCst(uint16_t* p) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t result;
|
| + __asm__ __volatile__("movw (%1), %0" : "=r"(result) : "r"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t LoadSeqCst(int16_t* p) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t result;
|
| + __asm__ __volatile__("movw (%1), %0" : "=r"(result) : "r"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t LoadSeqCst(uint32_t* p) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t result;
|
| + __asm__ __volatile__("movl (%1), %0" : "=r"(result) : "r"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t LoadSeqCst(int32_t* p) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t result;
|
| + __asm__ __volatile__("movl (%1), %0" : "=r"(result) : "r"(p));
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Store ///////////////////////////////////////////////////////////////////////
|
| +inline void StoreSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + xchg byte ptr [ecx], al
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgb %0, %1" : "+m"(*p) : "q"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline void StoreSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + xchg byte ptr [ecx], al
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgb %0, %1" : "+m"(*p) : "q"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline void StoreSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + xchg word ptr [ecx], ax
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgw %0, %1" : "+m"(*p) : "r"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline void StoreSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + xchg word ptr [ecx], ax
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgw %0, %1" : "+m"(*p) : "r"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline void StoreSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + xchg dword ptr [ecx], eax
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgl %0, %1" : "+m"(*p) : "r"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline void StoreSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + xchg dword ptr [ecx], eax
|
| + }
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgl %0, %1" : "+m"(*p) : "r"(value) : "memory");
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Add /////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t AddSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + lock xadd byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t result;
|
| + __asm__ __volatile__("lock xaddb %2, %1"
|
| + : "=q"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t AddSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + lock xadd byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t result;
|
| + __asm__ __volatile__("lock xaddb %2, %1"
|
| + : "=q"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t AddSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + lock xadd word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t result;
|
| + __asm__ __volatile__("lock xaddw %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t AddSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + lock xadd word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t result;
|
| + __asm__ __volatile__("lock xaddw %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t AddSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + lock xadd dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t result;
|
| + __asm__ __volatile__("lock xaddl %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t AddSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + lock xadd dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t result;
|
| + __asm__ __volatile__("lock xaddl %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Sub /////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t SubSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov al, value
|
| + neg al
|
| + mov ecx, p
|
| + lock xadd byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t result;
|
| + __asm__ __volatile__(
|
| + "negb %2\n\t"
|
| + "lock xaddb %2, %1"
|
| + : "=q"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t SubSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov al, value
|
| + neg al
|
| + mov ecx, p
|
| + lock xadd byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t result;
|
| + __asm__ __volatile__(
|
| + "negb %2\n\t"
|
| + "lock xaddb %2, %1"
|
| + : "=q"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t SubSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + neg ax
|
| + mov ecx, p
|
| + lock xadd word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t result;
|
| + __asm__ __volatile__(
|
| + "negw %2\n\t"
|
| + "lock xaddw %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t SubSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + neg ax
|
| + mov ecx, p
|
| + lock xadd word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t result;
|
| + __asm__ __volatile__(
|
| + "negw %2\n\t"
|
| + "lock xaddw %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t SubSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + neg eax
|
| + mov ecx, p
|
| + lock xadd dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t result;
|
| + __asm__ __volatile__(
|
| + "negl %2\n\t"
|
| + "lock xaddl %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t SubSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + neg eax
|
| + mov ecx, p
|
| + lock xadd dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t result;
|
| + __asm__ __volatile__(
|
| + "negl %2\n\t"
|
| + "lock xaddl %2, %1"
|
| + : "=r"(result), "+m"(*p)
|
| + : "0"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Exchange ////////////////////////////////////////////////////////////////////
|
| +inline uint8_t ExchangeSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + xchg byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgb %1, %2" : "+q"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t ExchangeSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov al, value
|
| + mov ecx, p
|
| + xchg byte ptr [ecx], al
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgb %1, %2" : "+q"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t ExchangeSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + xchg word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgw %1, %2" : "+r"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t ExchangeSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ax, value
|
| + mov ecx, p
|
| + xchg word ptr [ecx], ax
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgw %1, %2" : "+r"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t ExchangeSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + xchg dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgl %1, %2" : "+r"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t ExchangeSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov eax, value
|
| + mov ecx, p
|
| + xchg dword ptr [ecx], eax
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + __asm__ __volatile__("xchgl %1, %2" : "+r"(value), "+m"(*p) : : "memory");
|
| + return value;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// CompareExchange /////////////////////////////////////////////////////////////
|
| +inline uint8_t CompareExchangeSeqCst(uint8_t* p, uint8_t oldval,
|
| + uint8_t newval) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov al, oldval
|
| + mov dl, newval
|
| + mov ecx, p
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t result;
|
| + __asm__ __volatile__("lock cmpxchgb %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "q"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t CompareExchangeSeqCst(int8_t* p, int8_t oldval, int8_t newval) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov al, oldval
|
| + mov dl, newval
|
| + mov ecx, p
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t result;
|
| + __asm__ __volatile__("lock cmpxchgb %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "q"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t CompareExchangeSeqCst(uint16_t* p, uint16_t oldval,
|
| + uint16_t newval) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ax, oldval
|
| + mov dx, newval
|
| + mov ecx, p
|
| + lock cmpxchg word ptr [ecx], dx
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t result;
|
| + __asm__ __volatile__("lock cmpxchgw %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "r"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t CompareExchangeSeqCst(int16_t* p, int16_t oldval,
|
| + int16_t newval) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ax, oldval
|
| + mov dx, newval
|
| + mov ecx, p
|
| + lock cmpxchg word ptr [ecx], dx
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t result;
|
| + __asm__ __volatile__("lock cmpxchgw %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "r"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t CompareExchangeSeqCst(uint32_t* p, uint32_t oldval,
|
| + uint32_t newval) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov eax, oldval
|
| + mov edx, newval
|
| + mov ecx, p
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t result;
|
| + __asm__ __volatile__("lock cmpxchgl %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "r"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t CompareExchangeSeqCst(int32_t* p, int32_t oldval,
|
| + int32_t newval) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov eax, oldval
|
| + mov edx, newval
|
| + mov ecx, p
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t result;
|
| + __asm__ __volatile__("lock cmpxchgl %2, %1"
|
| + : "=a"(result), "+m"(*p)
|
| + : "r"(newval), "0"(oldval)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// And /////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t AndSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + and dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t temp;
|
| + uint8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "andb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t AndSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + and dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t temp;
|
| + int8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "andb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t AndSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + and dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t temp;
|
| + uint16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "andw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t AndSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + and dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t temp;
|
| + int16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "andw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t AndSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + and edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t temp;
|
| + uint32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "andl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t AndSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + and edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t temp;
|
| + int32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "andl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Or /////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t OrSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + or dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t temp;
|
| + uint8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "orb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t OrSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + or dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t temp;
|
| + int8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "orb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t OrSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + or dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t temp;
|
| + uint16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "orw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t OrSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + or dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t temp;
|
| + int16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "orw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t OrSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + or edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t temp;
|
| + uint32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "orl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t OrSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + or edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t temp;
|
| + int32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "orl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +// Xor /////////////////////////////////////////////////////////////////////////
|
| +inline uint8_t XorSeqCst(uint8_t* p, uint8_t value) {
|
| +#if V8_CC_MSVC
|
| + uint8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + xor dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint8_t temp;
|
| + uint8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "xorb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int8_t XorSeqCst(int8_t* p, int8_t value) {
|
| +#if V8_CC_MSVC
|
| + int8_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov al, byte ptr [ecx]
|
| + L0:
|
| + mov dl, value
|
| + xor dl, al
|
| + lock cmpxchg byte ptr [ecx], dl
|
| + jnz short L0
|
| + mov result, al
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int8_t temp;
|
| + int8_t result;
|
| + __asm__ __volatile__(
|
| + "movb %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movb %3, %2\n\t"
|
| + "xorb %0, %2\n\t"
|
| + "lock cmpxchgb %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&q"(temp)
|
| + : "q"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint16_t XorSeqCst(uint16_t* p, uint16_t value) {
|
| +#if V8_CC_MSVC
|
| + uint16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + xor dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint16_t temp;
|
| + uint16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "xorw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int16_t XorSeqCst(int16_t* p, int16_t value) {
|
| +#if V8_CC_MSVC
|
| + int16_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov ax, word ptr [ecx]
|
| + L0:
|
| + mov dx, value
|
| + xor dx, ax
|
| + lock cmpxchg word ptr [ecx], dx
|
| + jnz short L0
|
| + mov result, ax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int16_t temp;
|
| + int16_t result;
|
| + __asm__ __volatile__(
|
| + "movw %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movw %3, %2\n\t"
|
| + "xorw %0, %2\n\t"
|
| + "lock cmpxchgw %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline uint32_t XorSeqCst(uint32_t* p, uint32_t value) {
|
| +#if V8_CC_MSVC
|
| + uint32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + xor edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + uint32_t temp;
|
| + uint32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "xorl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +
|
| +
|
| +inline int32_t XorSeqCst(int32_t* p, int32_t value) {
|
| +#if V8_CC_MSVC
|
| + int32_t result;
|
| + __asm {
|
| + mov ecx, p
|
| + mov eax, dword ptr [ecx]
|
| + L0:
|
| + mov edx, value
|
| + xor edx, eax
|
| + lock cmpxchg dword ptr [ecx], edx
|
| + jnz short L0
|
| + mov result, eax
|
| + }
|
| + return result;
|
| +#elif V8_CC_GNU
|
| + int32_t temp;
|
| + int32_t result;
|
| + __asm__ __volatile__(
|
| + "movl %1, %0\n\t"
|
| + "1:\n\t"
|
| + "movl %3, %2\n\t"
|
| + "xorl %0, %2\n\t"
|
| + "lock cmpxchgl %2, %1\n\t"
|
| + "jnz 1b"
|
| + : "=&a"(result), "+m"(*p), "=&r"(temp)
|
| + : "r"(value)
|
| + : "memory");
|
| + return result;
|
| +#else
|
| +#error Unsupported compiler.
|
| +#endif
|
| +}
|
| +} // namespace atomics
|
| +} // namespace internal
|
| +} // namespace v8
|
|
|