OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 // | 6 // |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
11 | 11 |
| 12 #if defined(OS_QNX) |
| 13 #include <sys/cpuinline.h> |
| 14 #endif |
| 15 |
12 namespace base { | 16 namespace base { |
13 namespace subtle { | 17 namespace subtle { |
14 | 18 |
15 // Memory barriers on ARM are funky, but the kernel is here to help: | 19 // Memory barriers on ARM are funky, but the kernel is here to help: |
16 // | 20 // |
17 // * ARMv5 didn't support SMP, there is no memory barrier instruction at | 21 // * ARMv5 didn't support SMP, there is no memory barrier instruction at |
18 // all on this architecture, or when targeting its machine code. | 22 // all on this architecture, or when targeting its machine code. |
19 // | 23 // |
20 // * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by | 24 // * Some ARMv6 CPUs support SMP. A full memory barrier can be produced by |
21 // writing a random value to a very specific coprocessor register. | 25 // writing a random value to a very specific coprocessor register. |
(...skipping 11 matching lines...) Expand all Loading... |
33 // core devices, this is an empty function that exits immediately. | 37 // core devices, this is an empty function that exits immediately. |
34 // On multi-core devices, it implements a full memory barrier. | 38 // On multi-core devices, it implements a full memory barrier. |
35 // | 39 // |
36 // * This source could be compiled to ARMv5 machine code that runs on a | 40 // * This source could be compiled to ARMv5 machine code that runs on a |
37 // multi-core ARMv6 or ARMv7 device. In this case, memory barriers | 41 // multi-core ARMv6 or ARMv7 device. In this case, memory barriers |
38 // are needed for correct execution. Always call the kernel helper, even | 42 // are needed for correct execution. Always call the kernel helper, even |
39 // when targeting ARMv5TE. | 43 // when targeting ARMv5TE. |
40 // | 44 // |
41 | 45 |
42 inline void MemoryBarrier() { | 46 inline void MemoryBarrier() { |
43 // Note: This is a function call, which is also an implicit compiler | 47 #if defined(OS_LINUX) || defined(OS_ANDROID) |
44 // barrier. | 48 // Note: This is a function call, which is also an implicit compiler barrier. |
45 typedef void (*KernelMemoryBarrierFunc)(); | 49 typedef void (*KernelMemoryBarrierFunc)(); |
46 ((KernelMemoryBarrierFunc)0xffff0fa0)(); | 50 ((KernelMemoryBarrierFunc)0xffff0fa0)(); |
| 51 #elif defined(OS_QNX) |
| 52 __cpu_membarrier(); |
| 53 #else |
| 54 #error MemoryBarrier() is not implemented on this platform. |
| 55 #endif |
47 } | 56 } |
48 | 57 |
49 // An ARM toolchain would only define one of these depending on which | 58 // An ARM toolchain would only define one of these depending on which |
50 // variant of the target architecture is being used. This tests against | 59 // variant of the target architecture is being used. This tests against |
51 // any known ARMv6 or ARMv7 variant, where it is possible to directly | 60 // any known ARMv6 or ARMv7 variant, where it is possible to directly |
52 // use ldrex/strex instructions to implement fast atomic operations. | 61 // use ldrex/strex instructions to implement fast atomic operations. |
53 #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ | 62 #if defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \ |
54 defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ | 63 defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \ |
55 defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ | 64 defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \ |
56 defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ | 65 defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \ |
(...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
276 | 285 |
277 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 286 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
278 MemoryBarrier(); | 287 MemoryBarrier(); |
279 return *ptr; | 288 return *ptr; |
280 } | 289 } |
281 | 290 |
282 } // namespace base::subtle | 291 } // namespace base::subtle |
283 } // namespace base | 292 } // namespace base |
284 | 293 |
285 #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ | 294 #endif // BASE_ATOMICOPS_INTERNALS_ARM_GCC_H_ |
OLD | NEW |