Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(123)

Unified Diff: src/runtime/runtime-atomics-x64.asm

Issue 1550803006: Convert runtime atomics functions to inline asm (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: update BUILD.gn, add *-inl.h and *.h to gyp as well Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/runtime/runtime-atomics-x64.h ('k') | src/runtime/runtime-atomics-x64-inl.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/runtime/runtime-atomics-x64.asm
diff --git a/src/runtime/runtime-atomics-x64.asm b/src/runtime/runtime-atomics-x64.asm
new file mode 100644
index 0000000000000000000000000000000000000000..50f4ad1309272159755c7c0376e0770d990a6d07
--- /dev/null
+++ b/src/runtime/runtime-atomics-x64.asm
@@ -0,0 +1,424 @@
+; Copyright 2016 the V8 project authors. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+.CODE
+
+func MACRO name
+PUBLIC name
+name:
+endm
+
+;; LOAD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::LoadSeqCst(uint8_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YAEPEAE@Z
+ mov al, [rcx]
+ ret
+
+; int8_t v8::internal::atomics::LoadSeqCst(int8_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YACPEAC@Z
+ mov al, [rcx]
+ ret
+
+; uint16_t v8::internal::atomics::LoadSeqCst(uint16_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YAGPEAG@Z
+ mov ax, [rcx]
+ ret
+
+; int16_t v8::internal::atomics::LoadSeqCst(int16_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YAFPEAF@Z
+ mov ax, [rcx]
+ ret
+
+; uint32_t v8::internal::atomics::LoadSeqCst(uint32_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YAIPEAI@Z
+ mov eax, [rcx]
+ ret
+
+; int32_t v8::internal::atomics::LoadSeqCst(int32_t*);
+func ?LoadSeqCst@atomics@internal@v8@@YAHPEAH@Z
+ mov eax, [rcx]
+ ret
+
+
+;; LOAD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; void v8::internal::atomics::StoreSeqCst(uint8_t*, uint8_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEAEE@Z
+ xchg [rcx], dl
+ ret
+
+; void v8::internal::atomics::StoreSeqCst(int8_t*, int8_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEACC@Z
+ xchg [rcx], dl
+ ret
+
+; void v8::internal::atomics::StoreSeqCst(uint16_t*, uint16_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEAGG@Z
+ xchg [rcx], dx
+ ret
+
+; void v8::internal::atomics::StoreSeqCst(int16_t*, int16_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEAFF@Z
+ xchg [rcx], dx
+ ret
+
+; void v8::internal::atomics::StoreSeqCst(uint32_t*, uint32_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEAII@Z
+ xchg [rcx], edx
+ ret
+
+; void v8::internal::atomics::StoreSeqCst(int32_t*, int32_t);
+func ?StoreSeqCst@atomics@internal@v8@@YAXPEAHH@Z
+ xchg [rcx], edx
+ ret
+
+
+;; ADD ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::AddSeqCst(uint8_t*, uint8_t);
+func ?AddSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ lock xadd [rcx], dl
+ movzx eax, dl
+ ret
+
+; int8_t v8::internal::atomics::AddSeqCst(int8_t*, int8_t);
+func ?AddSeqCst@atomics@internal@v8@@YACPEACC@Z
+ lock xadd [rcx], dl
+ movsx eax, dl
+ ret
+
+; uint16_t v8::internal::atomics::AddSeqCst(uint16_t*, uint16_t);
+func ?AddSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ lock xadd [rcx], dx
+ movzx eax, dx
+ ret
+
+; int16_t v8::internal::atomics::AddSeqCst(int16_t*, int16_t);
+func ?AddSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ lock xadd [rcx], dx
+ movsx eax, dx
+ ret
+
+; uint32_t v8::internal::atomics::AddSeqCst(uint32_t*, uint32_t);
+func ?AddSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ lock xadd [rcx], edx
+ mov eax, edx
+ ret
+
+; int32_t v8::internal::atomics::AddSeqCst(int32_t*, int32_t);
+func ?AddSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ lock xadd [rcx], edx
+ mov eax, edx
+ ret
+
+
+;; SUB ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::SubSeqCst(uint8_t*, uint8_t);
+func ?SubSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ neg dl
+ lock xadd [rcx], dl
+ movzx eax, dl
+ ret
+
+; int8_t v8::internal::atomics::SubSeqCst(int8_t*, int8_t);
+func ?SubSeqCst@atomics@internal@v8@@YACPEACC@Z
+ neg dl
+ lock xadd [rcx], dl
+ movsx eax, dl
+ ret
+
+; uint16_t v8::internal::atomics::SubSeqCst(uint16_t*, uint16_t);
+func ?SubSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ neg dx
+ lock xadd [rcx], dx
+ movzx eax, dx
+ ret
+
+; int16_t v8::internal::atomics::SubSeqCst(int16_t*, int16_t);
+func ?SubSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ neg dx
+ lock xadd [rcx], dx
+ movsx eax, dx
+ ret
+
+; uint32_t v8::internal::atomics::SubSeqCst(uint32_t*, uint32_t);
+func ?SubSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ neg edx
+ lock xadd [rcx], edx
+ mov eax, edx
+ ret
+
+; int32_t v8::internal::atomics::SubSeqCst(int32_t*, int32_t);
+func ?SubSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ neg edx
+ lock xadd [rcx], edx
+ mov eax, edx
+ ret
+
+
+;; EXCHANGE ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::ExchangeSeqCst(uint8_t*, uint8_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ xchg [rcx], dl
+ movzx eax, dl
+ ret
+
+; int8_t v8::internal::atomics::ExchangeSeqCst(int8_t*, int8_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YACPEACC@Z
+ xchg [rcx], dl
+ movsx eax, dl
+ ret
+
+; uint16_t v8::internal::atomics::ExchangeSeqCst(uint16_t*, uint16_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ xchg [rcx], dx
+ movzx eax, dx
+ ret
+
+; int16_t v8::internal::atomics::ExchangeSeqCst(int16_t*, int16_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ xchg [rcx], dx
+ movsx eax, dx
+ ret
+
+; uint32_t v8::internal::atomics::ExchangeSeqCst(uint32_t*, uint32_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ xchg [rcx], edx
+ mov eax, edx
+ ret
+
+; int32_t v8::internal::atomics::ExchangeSeqCst(int32_t*, int32_t);
+func ?ExchangeSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ xchg [rcx], edx
+ mov eax, edx
+ ret
+
+
+;; COMPARE EXCHANGE ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::CompareExchangeSeqCst(
+; uint8_t*, uint8_t oldval, uint8_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YAEPEAEEE@Z
+ mov al, dl
+ lock cmpxchg [rcx], r8b
+ ret
+
+; int8_t v8::internal::atomics::CompareExchangeSeqCst(
+; int8_t*, int8_t oldval, int8_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YACPEACCC@Z
+ mov al, dl
+ lock cmpxchg [rcx], r8b
+ ret
+
+; uint16_t v8::internal::atomics::CompareExchangeSeqCst(
+; uint16_t*, uint16_t oldval, uint16_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YAGPEAGGG@Z
+ mov ax, dx
+ lock cmpxchg [rcx], r8w
+ ret
+
+; int16_t v8::internal::atomics::CompareExchangeSeqCst(
+; int16_t*, int16_t oldval, int16_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YAFPEAFFF@Z
+ mov ax, dx
+ lock cmpxchg [rcx], r8w
+ ret
+
+; uint32_t v8::internal::atomics::CompareExchangeSeqCst(
+; uint32_t*, uint32_t oldval, uint32_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YAIPEAIII@Z
+ mov eax, edx
+ lock cmpxchg [rcx], r8d
+ ret
+
+; int32_t v8::internal::atomics::CompareExchangeSeqCst(
+; int32_t*, int32_t oldval, int32_t newval);
+func ?CompareExchangeSeqCst@atomics@internal@v8@@YAHPEAHHH@Z
+ mov eax, edx
+ lock cmpxchg [rcx], r8d
+ ret
+
+
+;; AND ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::AndSeqCst(uint8_t*, uint8_t);
+func ?AndSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ and r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; int8_t v8::internal::atomics::AndSeqCst(int8_t*, int8_t);
+func ?AndSeqCst@atomics@internal@v8@@YACPEACC@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ and r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; uint16_t v8::internal::atomics::AndSeqCst(uint16_t*, uint16_t);
+func ?AndSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ and r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; int16_t v8::internal::atomics::AndSeqCst(int16_t*, int16_t);
+func ?AndSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ and r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; uint32_t v8::internal::atomics::AndSeqCst(uint32_t*, uint32_t);
+func ?AndSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ and r8d, eax
+ lock cmpxchg [rcx], r8d
+ ret
+
+; int32_t v8::internal::atomics::AndSeqCst(int32_t*, int32_t);
+func ?AndSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ and r8d, eax
+ lock cmpxchg [rcx], r8d
+ jnz short @B
+ ret
+
+
+;; OR ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::OrSeqCst(uint8_t*, uint8_t);
+func ?OrSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ or r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; int8_t v8::internal::atomics::OrSeqCst(int8_t*, int8_t);
+func ?OrSeqCst@atomics@internal@v8@@YACPEACC@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ or r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; uint16_t v8::internal::atomics::OrSeqCst(uint16_t*, uint16_t);
+func ?OrSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ or r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; int16_t v8::internal::atomics::OrSeqCst(int16_t*, int16_t);
+func ?OrSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ or r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; uint32_t v8::internal::atomics::OrSeqCst(uint32_t*, uint32_t);
+func ?OrSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ or r8d, eax
+ lock cmpxchg [rcx], r8d
+ jnz short @B
+ ret
+
+; int32_t v8::internal::atomics::OrSeqCst(int32_t*, int32_t);
+func ?OrSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ or r8d, eax
+ lock cmpxchg [rcx], r8d
+ jnz short @B
+ ret
+
+
+;; XOR ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+; uint8_t v8::internal::atomics::XorSeqCst(uint8_t*, uint8_t);
+func ?XorSeqCst@atomics@internal@v8@@YAEPEAEE@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ xor r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; int8_t v8::internal::atomics::XorSeqCst(int8_t*, int8_t);
+func ?XorSeqCst@atomics@internal@v8@@YACPEACC@Z
+ mov al, [rcx]
+@@:
+ mov r8b, dl
+ xor r8b, al
+ lock cmpxchg [rcx], r8b
+ jnz short @B
+ ret
+
+; uint16_t v8::internal::atomics::XorSeqCst(uint16_t*, uint16_t);
+func ?XorSeqCst@atomics@internal@v8@@YAGPEAGG@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ xor r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; int16_t v8::internal::atomics::XorSeqCst(int16_t*, int16_t);
+func ?XorSeqCst@atomics@internal@v8@@YAFPEAFF@Z
+ mov ax, [rcx]
+@@:
+ mov r8w, dx
+ xor r8w, ax
+ lock cmpxchg [rcx], r8w
+ jnz short @B
+ ret
+
+; uint32_t v8::internal::atomics::XorSeqCst(uint32_t*, uint32_t);
+func ?XorSeqCst@atomics@internal@v8@@YAIPEAII@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ xor r8d, eax
+ lock cmpxchg [rcx], r8d
+ jnz short @B
+ ret
+
+; int32_t v8::internal::atomics::XorSeqCst(int32_t*, int32_t);
+func ?XorSeqCst@atomics@internal@v8@@YAHPEAHH@Z
+ mov eax, [rcx]
+@@:
+ mov r8d, edx
+ xor r8d, eax
+ lock cmpxchg [rcx], r8d
+ jnz short @B
+ ret
+
+END
« no previous file with comments | « src/runtime/runtime-atomics-x64.h ('k') | src/runtime/runtime-atomics-x64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698