| OLD | NEW | 
|    1 // Copyright 2012 the V8 project authors. All rights reserved. |    1 // Copyright 2012 the V8 project authors. All rights reserved. | 
|    2 // Use of this source code is governed by a BSD-style license that can be |    2 // Use of this source code is governed by a BSD-style license that can be | 
|    3 // found in the LICENSE file. |    3 // found in the LICENSE file. | 
|    4  |    4  | 
|    5 // This file is an internal atomic implementation, use atomicops.h instead. |    5 // This file is an internal atomic implementation, use atomicops.h instead. | 
 |    6 // | 
|    6  |    7  | 
|    7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |    8 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ | 
|    8 #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |    9 #define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ | 
|    9  |   10  | 
|   10 namespace v8 { |   11 namespace v8 { | 
|   11 namespace base { |   12 namespace base { | 
|   12  |   13  | 
|   13 inline void MemoryBarrier() { __sync_synchronize(); } |  | 
|   14  |  | 
|   15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |   14 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 
|   16                                          Atomic32 old_value, |   15                                          Atomic32 old_value, | 
|   17                                          Atomic32 new_value) { |   16                                          Atomic32 new_value) { | 
|   18   return __sync_val_compare_and_swap(ptr, old_value, new_value); |   17   return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | 
|   19 } |   18 } | 
|   20  |   19  | 
|   21 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |   20 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 
|   22                                          Atomic32 new_value) { |   21                                          Atomic32 new_value) { | 
|   23   return __sync_lock_test_and_set(ptr, new_value); |   22   Atomic32 old_value; | 
 |   23   do { | 
 |   24     old_value = *ptr; | 
 |   25   } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | 
 |   26   return old_value; | 
|   24 } |   27 } | 
|   25  |   28  | 
|   26 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |   29 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 
|   27                                           Atomic32 increment) { |   30                                           Atomic32 increment) { | 
|   28   return __sync_add_and_fetch(ptr, increment); |   31   return Barrier_AtomicIncrement(ptr, increment); | 
|   29 } |   32 } | 
|   30  |   33  | 
|   31 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |   34 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 
|   32                                         Atomic32 increment) { |   35                                         Atomic32 increment) { | 
|   33   return __sync_add_and_fetch(ptr, increment); |   36   for (;;) { | 
 |   37     Atomic32 old_value = *ptr; | 
 |   38     Atomic32 new_value = old_value + increment; | 
 |   39     if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { | 
 |   40       return new_value; | 
 |   41       // The exchange took place as expected. | 
 |   42     } | 
 |   43     // Otherwise, *ptr changed mid-loop and we need to retry. | 
 |   44   } | 
|   34 } |   45 } | 
|   35  |   46  | 
|   36 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |   47 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 
|   37                                        Atomic32 old_value, Atomic32 new_value) { |   48                                        Atomic32 old_value, Atomic32 new_value) { | 
|   38   return __sync_val_compare_and_swap(ptr, old_value, new_value); |   49   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
|   39 } |   50 } | 
|   40  |   51  | 
|   41 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |   52 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 
|   42                                        Atomic32 old_value, Atomic32 new_value) { |   53                                        Atomic32 old_value, Atomic32 new_value) { | 
|   43   return __sync_val_compare_and_swap(ptr, old_value, new_value); |   54   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
|   44 } |   55 } | 
|   45  |   56  | 
|   46 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |   57 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 
|   47   __sync_lock_test_and_set(ptr, value); |   58   *ptr = value; | 
|   48 } |   59 } | 
|   49  |   60  | 
|   50 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |   61 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 
|   51   __sync_lock_test_and_set(ptr, value); |   62   *ptr = value; | 
|   52 } |   63 } | 
|   53  |   64  | 
 |   65 inline void MemoryBarrier() { | 
 |   66   __asm__ __volatile__("sync" : : : "memory"); } | 
 |   67  | 
|   54 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |   68 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 
|   55   __sync_lock_test_and_set(ptr, value); |   69   *ptr = value; | 
 |   70   MemoryBarrier(); | 
|   56 } |   71 } | 
|   57  |   72  | 
|   58 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |   73 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 
|   59   __sync_lock_test_and_set(ptr, value); |   74   MemoryBarrier(); | 
 |   75   *ptr = value; | 
|   60 } |   76 } | 
|   61  |   77  | 
|   62 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |   78 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { return *ptr; } | 
|   63   return __sync_add_and_fetch(ptr, 0); |  | 
|   64 } |  | 
|   65  |   79  | 
|   66 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |   80 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } | 
|   67   return __sync_add_and_fetch(ptr, 0); |  | 
|   68 } |  | 
|   69  |   81  | 
|   70 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |   82 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 
|   71   return __sync_add_and_fetch(ptr, 0); |   83   Atomic32 value = *ptr; | 
 |   84   MemoryBarrier(); | 
 |   85   return value; | 
|   72 } |   86 } | 
|   73  |   87  | 
|   74 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |   88 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 
|   75   return __sync_add_and_fetch(ptr, 0); |   89   MemoryBarrier(); | 
 |   90   return *ptr; | 
|   76 } |   91 } | 
|   77  |   92  | 
|   78 // 64-bit versions of the operations. |   93 #ifdef V8_TARGET_ARCH_PPC64 | 
|   79 // See the 32-bit versions for comments. |  | 
|   80  |  | 
|   81 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |   94 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 
|   82                                          Atomic64 old_value, |   95                                          Atomic64 old_value, | 
|   83                                          Atomic64 new_value) { |   96                                          Atomic64 new_value) { | 
|   84   return __sync_val_compare_and_swap(ptr, old_value, new_value); |   97   return (__sync_val_compare_and_swap(ptr, old_value, new_value)); | 
|   85 } |   98 } | 
|   86  |   99  | 
|   87 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |  100 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 
|   88                                          Atomic64 new_value) { |  101                                          Atomic64 new_value) { | 
|   89   return __sync_lock_test_and_set(ptr, new_value); |  102   Atomic64 old_value; | 
 |  103   do { | 
 |  104     old_value = *ptr; | 
 |  105   } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); | 
 |  106   return old_value; | 
|   90 } |  107 } | 
|   91  |  108  | 
|   92 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |  109 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 
|   93                                           Atomic64 increment) { |  110                                           Atomic64 increment) { | 
|   94   return __sync_add_and_fetch(ptr, increment); |  111   return Barrier_AtomicIncrement(ptr, increment); | 
|   95 } |  112 } | 
|   96  |  113  | 
|   97 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |  114 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 
|   98                                         Atomic64 increment) { |  115                                         Atomic64 increment) { | 
|   99   return __sync_add_and_fetch(ptr, increment); |  116   for (;;) { | 
 |  117     Atomic64 old_value = *ptr; | 
 |  118     Atomic64 new_value = old_value + increment; | 
 |  119     if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { | 
 |  120       return new_value; | 
 |  121       // The exchange took place as expected. | 
 |  122     } | 
 |  123     // Otherwise, *ptr changed mid-loop and we need to retry. | 
 |  124   } | 
|  100 } |  125 } | 
|  101  |  126  | 
|  102 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |  127 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 
|  103                                        Atomic64 old_value, Atomic64 new_value) { |  128                                        Atomic64 old_value, Atomic64 new_value) { | 
|  104   return __sync_val_compare_and_swap(ptr, old_value, new_value); |  129   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
|  105 } |  130 } | 
|  106  |  131  | 
|  107 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |  132 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 
|  108                                        Atomic64 old_value, Atomic64 new_value) { |  133                                        Atomic64 old_value, Atomic64 new_value) { | 
|  109   return __sync_val_compare_and_swap(ptr, old_value, new_value); |  134   return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 
|  110 } |  135 } | 
|  111  |  136  | 
|  112 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |  137 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 
|  113   __sync_lock_test_and_set(ptr, value); |  138   *ptr = value; | 
|  114 } |  139 } | 
|  115  |  140  | 
|  116 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |  141 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 
|  117   __sync_lock_test_and_set(ptr, value); |  142   *ptr = value; | 
 |  143   MemoryBarrier(); | 
|  118 } |  144 } | 
|  119  |  145  | 
|  120 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |  146 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 
|  121   __sync_lock_test_and_set(ptr, value); |  147   MemoryBarrier(); | 
 |  148   *ptr = value; | 
|  122 } |  149 } | 
|  123  |  150  | 
|  124 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |  151 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } | 
|  125   return __sync_add_and_fetch(ptr, 0); |  | 
|  126 } |  | 
|  127  |  152  | 
|  128 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |  153 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 
|  129   return __sync_add_and_fetch(ptr, 0); |  154   Atomic64 value = *ptr; | 
 |  155   MemoryBarrier(); | 
 |  156   return value; | 
|  130 } |  157 } | 
|  131  |  158  | 
|  132 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |  159 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 
|  133   return __sync_add_and_fetch(ptr, 0); |  160   MemoryBarrier(); | 
 |  161   return *ptr; | 
|  134 } |  162 } | 
 |  163  | 
 |  164 #endif | 
|  135 } |  165 } | 
|  136 }  // namespace v8::base |  166 }  // namespace v8::base | 
|  137  |  167  | 
|  138 #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |  168 #endif  // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ | 
| OLD | NEW |