| OLD | NEW | 
|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 100   } | 100   } | 
| 101   return x; | 101   return x; | 
| 102 } | 102 } | 
| 103 | 103 | 
| 104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 
| 105                                        Atomic32 old_value, | 105                                        Atomic32 old_value, | 
| 106                                        Atomic32 new_value) { | 106                                        Atomic32 new_value) { | 
| 107   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 107   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
| 108 } | 108 } | 
| 109 | 109 | 
|  | 110 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 
|  | 111   *ptr = value; | 
|  | 112 } | 
|  | 113 | 
| 110 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 114 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 111   *ptr = value; | 115   *ptr = value; | 
| 112 } | 116 } | 
| 113 | 117 | 
| 114 #if defined(__x86_64__) | 118 #if defined(__x86_64__) | 
| 115 | 119 | 
| 116 // 64-bit implementations of memory barrier can be simpler, because it | 120 // 64-bit implementations of memory barrier can be simpler, because it | 
| 117 // "mfence" is guaranteed to exist. | 121 // "mfence" is guaranteed to exist. | 
| 118 inline void MemoryBarrier() { | 122 inline void MemoryBarrier() { | 
| 119   __asm__ __volatile__("mfence" : : : "memory"); | 123   __asm__ __volatile__("mfence" : : : "memory"); | 
| (...skipping 25 matching lines...) Expand all  Loading... | 
| 145   } | 149   } | 
| 146 } | 150 } | 
| 147 #endif | 151 #endif | 
| 148 | 152 | 
| 149 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 153 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
| 150   ATOMICOPS_COMPILER_BARRIER(); | 154   ATOMICOPS_COMPILER_BARRIER(); | 
| 151   *ptr = value;  // An x86 store acts as a release barrier. | 155   *ptr = value;  // An x86 store acts as a release barrier. | 
| 152   // See comments in Atomic64 version of Release_Store(), below. | 156   // See comments in Atomic64 version of Release_Store(), below. | 
| 153 } | 157 } | 
| 154 | 158 | 
|  | 159 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 
|  | 160   return *ptr; | 
|  | 161 } | 
|  | 162 | 
| 155 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 163 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 
| 156   return *ptr; | 164   return *ptr; | 
| 157 } | 165 } | 
| 158 | 166 | 
| 159 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 167 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
| 160   Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier. | 168   Atomic32 value = *ptr;  // An x86 load acts as a acquire barrier. | 
| 161   // See comments in Atomic64 version of Release_Store(), below. | 169   // See comments in Atomic64 version of Release_Store(), below. | 
| 162   ATOMICOPS_COMPILER_BARRIER(); | 170   ATOMICOPS_COMPILER_BARRIER(); | 
| 163   return value; | 171   return value; | 
| 164 } | 172 } | 
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 278   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 286   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
| 279 } | 287 } | 
| 280 | 288 | 
| 281 #endif  // defined(__x86_64__) | 289 #endif  // defined(__x86_64__) | 
| 282 | 290 | 
| 283 } }  // namespace v8::internal | 291 } }  // namespace v8::internal | 
| 284 | 292 | 
| 285 #undef ATOMICOPS_COMPILER_BARRIER | 293 #undef ATOMICOPS_COMPILER_BARRIER | 
| 286 | 294 | 
| 287 #endif  // V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | 295 #endif  // V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | 
| OLD | NEW | 
|---|