Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(293)

Unified Diff: src/atomicops_internals_arm64_gcc.h

Issue 212673006: Update atomicops_internals_arm64_gcc with changes made in chromium base/ (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/atomicops_internals_arm64_gcc.h
diff --git a/src/atomicops_internals_arm64_gcc.h b/src/atomicops_internals_arm64_gcc.h
index 074da5841ecc5d33d0644f81e7edefa29f533373..e6cac19932ad6858311178b8f83f1ffd4f6bd6be 100644
--- a/src/atomicops_internals_arm64_gcc.h
+++ b/src/atomicops_internals_arm64_gcc.h
@@ -33,7 +33,13 @@
namespace v8 {
namespace internal {
-inline void MemoryBarrier() { /* Not used. */ }
+inline void MemoryBarrier() {
+ __asm__ __volatile__ ( // NOLINT
+ "dmb ish \n\t" // Data memory barrier.
+ ::: "memory"
+ ); // NOLINT
+}
+
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
@@ -43,17 +49,17 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
"clrex \n\t" // In case we didn't swap.
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -68,13 +74,13 @@ inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
: [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [new_value]"r" (new_value)
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
: "memory"
); // NOLINT
@@ -88,14 +94,14 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
+ "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
"add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
+ "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
"cbnz %w[temp], 0b \n\t" // Retry on failure.
: [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
: "memory"
); // NOLINT
@@ -104,23 +110,9 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
- Atomic32 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- "0: \n\t"
- "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
- "add %w[result], %w[result], %w[increment]\n\t"
- "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
- "cbnz %w[temp], 0b \n\t" // Retry on failure.
- "dmb ish \n\t" // Data memory barrier.
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
+ MemoryBarrier();
+ Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
return result;
}
@@ -133,10 +125,10 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"dmb ish \n\t" // Data memory barrier.
"1: \n\t"
@@ -144,9 +136,9 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
// 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -160,21 +152,22 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 prev;
int32_t temp;
+ MemoryBarrier();
+
__asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
"0: \n\t"
- "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
+ "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
"cmp %w[prev], %w[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
+ "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
"cbnz %w[temp], 0b \n\t" // Retry if it did not work.
"1: \n\t"
// If the compare failed the we still need a 'clrex'.
"clrex \n\t"
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -188,17 +181,11 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering before the store above.
- ); // NOLINT
+ MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering after the store below.
- ); // NOLINT
+ MemoryBarrier();
*ptr = value;
}
@@ -208,18 +195,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering before the load above.
- ); // NOLINT
+ MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t" // Data memory barrier.
- ::: "memory" // Prevent gcc from reordering after the load below.
- ); // NOLINT
+ MemoryBarrier();
return *ptr;
}
@@ -234,17 +215,17 @@ inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -259,13 +240,13 @@ inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "ldxr %[result], %[ptr] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [new_value]"r" (new_value)
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [new_value]"r" (new_value)
: "memory"
); // NOLINT
@@ -279,14 +260,14 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
+ "ldxr %[result], %[ptr] \n\t"
"add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], [%[ptr]] \n\t"
+ "stxr %w[temp], %[result], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
: [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [increment]"r" (increment)
: "memory"
); // NOLINT
@@ -295,23 +276,9 @@ inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- Atomic64 result;
- int32_t temp;
-
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- "0: \n\t"
- "ldxr %[result], [%[ptr]] \n\t"
- "add %[result], %[result], %[increment] \n\t"
- "stxr %w[temp], %[result], [%[ptr]] \n\t"
- "cbnz %w[temp], 0b \n\t"
- "dmb ish \n\t"
- : [result]"=&r" (result),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [increment]"r" (increment)
- : "memory"
- ); // NOLINT
+ MemoryBarrier();
+ Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
return result;
}
@@ -324,18 +291,18 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
__asm__ __volatile__ ( // NOLINT
"0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"dmb ish \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -349,20 +316,21 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 prev;
int32_t temp;
+ MemoryBarrier();
+
__asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
"0: \n\t"
- "ldxr %[prev], [%[ptr]] \n\t"
+ "ldxr %[prev], %[ptr] \n\t"
"cmp %[prev], %[old_value] \n\t"
"bne 1f \n\t"
- "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
+ "stxr %w[temp], %[new_value], %[ptr] \n\t"
"cbnz %w[temp], 0b \n\t"
"1: \n\t"
"clrex \n\t"
: [prev]"=&r" (prev),
- [temp]"=&r" (temp)
- : [ptr]"r" (ptr),
- [old_value]"r" (old_value),
+ [temp]"=&r" (temp),
+ [ptr]"+Q" (*ptr)
+ : [old_value]"r" (old_value),
[new_value]"r" (new_value)
: "memory", "cc"
); // NOLINT
@@ -376,17 +344,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
*ptr = value;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
+ MemoryBarrier();
}
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
+ MemoryBarrier();
*ptr = value;
}
@@ -396,18 +358,12 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
Atomic64 value = *ptr;
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
+ MemoryBarrier();
return value;
}
inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __asm__ __volatile__ ( // NOLINT
- "dmb ish \n\t"
- ::: "memory"
- ); // NOLINT
+ MemoryBarrier();
return *ptr;
}
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698