| OLD | NEW |
| 1 /* Copyright (c) 2006, Google Inc. | 1 /* Copyright (c) 2006, Google Inc. |
| 2 * All rights reserved. | 2 * All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 // of a compare-and-swap which is expensive). | 37 // of a compare-and-swap which is expensive). |
| 38 | 38 |
| 39 // SpinLock is async signal safe. | 39 // SpinLock is async signal safe. |
| 40 // If used within a signal handler, all lock holders | 40 // If used within a signal handler, all lock holders |
| 41 // should block the signal even outside the signal handler. | 41 // should block the signal even outside the signal handler. |
| 42 | 42 |
| 43 #ifndef BASE_SPINLOCK_H_ | 43 #ifndef BASE_SPINLOCK_H_ |
| 44 #define BASE_SPINLOCK_H_ | 44 #define BASE_SPINLOCK_H_ |
| 45 | 45 |
| 46 #include <config.h> | 46 #include <config.h> |
| 47 #include "base/atomicops.h" |
| 47 #include "base/basictypes.h" | 48 #include "base/basictypes.h" |
| 48 #include "base/atomicops.h" | |
| 49 #include "base/dynamic_annotations.h" | 49 #include "base/dynamic_annotations.h" |
| 50 #include "base/thread_annotations.h" | 50 #include "base/thread_annotations.h" |
| 51 | 51 |
| 52 class LOCKABLE SpinLock { | 52 class LOCKABLE SpinLock { |
| 53 public: | 53 public: |
| 54 SpinLock() : lockword_(0) { } | 54 SpinLock() : lockword_(kSpinLockFree) { } |
| 55 | 55 |
| 56 // Special constructor for use with static SpinLock objects. E.g., | 56 // Special constructor for use with static SpinLock objects. E.g., |
| 57 // | 57 // |
| 58 // static SpinLock lock(base::LINKER_INITIALIZED); | 58 // static SpinLock lock(base::LINKER_INITIALIZED); |
| 59 // | 59 // |
| 60 // When intialized using this constructor, we depend on the fact | 60 // When intialized using this constructor, we depend on the fact |
| 61 // that the linker has already initialized the memory appropriately. | 61 // that the linker has already initialized the memory appropriately. |
| 62 // A SpinLock constructed like this can be freely used from global | 62 // A SpinLock constructed like this can be freely used from global |
| 63 // initializers without worrying about the order in which global | 63 // initializers without worrying about the order in which global |
| 64 // initializers run. | 64 // initializers run. |
| 65 explicit SpinLock(base::LinkerInitialized /*x*/) { | 65 explicit SpinLock(base::LinkerInitialized /*x*/) { |
| 66 // Does nothing; lockword_ is already initialized | 66 // Does nothing; lockword_ is already initialized |
| 67 } | 67 } |
| 68 | 68 |
| 69 // Acquire this SpinLock. | 69 // Acquire this SpinLock. |
| 70 // TODO(csilvers): uncomment the annotation when we figure out how to | 70 // TODO(csilvers): uncomment the annotation when we figure out how to |
| 71 // support this macro with 0 args (see thread_annotations.h) | 71 // support this macro with 0 args (see thread_annotations.h) |
| 72 inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ { | 72 inline void Lock() /*EXCLUSIVE_LOCK_FUNCTION()*/ { |
| 73 if (Acquire_CompareAndSwap(&lockword_, 0, 1) != 0) { | 73 if (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, |
| 74 kSpinLockHeld) != kSpinLockFree) { |
| 74 SlowLock(); | 75 SlowLock(); |
| 75 } | 76 } |
| 76 ANNOTATE_RWLOCK_ACQUIRED(this, 1); | 77 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
| 77 } | 78 } |
| 78 | 79 |
| 79 // Acquire this SpinLock and return true if the acquisition can be | 80 // Try to acquire this SpinLock without blocking and return true if the |
| 80 // done without blocking, else return false. If this SpinLock is | 81 // acquisition was successful. If the lock was not acquired, false is |
| 81 // free at the time of the call, TryLock will return true with high | 82 // returned. If this SpinLock is free at the time of the call, TryLock |
| 82 // probability. | 83 // will return true with high probability. |
| 83 inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { | 84 inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) { |
| 84 bool res = (Acquire_CompareAndSwap(&lockword_, 0, 1) == 0); | 85 bool res = |
| 86 (base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree, |
| 87 kSpinLockHeld) == kSpinLockFree); |
| 85 if (res) { | 88 if (res) { |
| 86 ANNOTATE_RWLOCK_ACQUIRED(this, 1); | 89 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
| 87 } | 90 } |
| 88 return res; | 91 return res; |
| 89 } | 92 } |
| 90 | 93 |
| 91 // Release this SpinLock, which must be held by the calling thread. | 94 // Release this SpinLock, which must be held by the calling thread. |
| 92 // TODO(csilvers): uncomment the annotation when we figure out how to | 95 // TODO(csilvers): uncomment the annotation when we figure out how to |
| 93 // support this macro with 0 args (see thread_annotations.h) | 96 // support this macro with 0 args (see thread_annotations.h) |
| 94 inline void Unlock() /*UNLOCK_FUNCTION()*/ { | 97 inline void Unlock() /*UNLOCK_FUNCTION()*/ { |
| 95 // This is defined in mutex.cc. | 98 uint64 wait_cycles = |
| 96 extern void SubmitSpinLockProfileData(const void *, int64); | 99 static_cast<uint64>(base::subtle::NoBarrier_Load(&lockword_)); |
| 97 | |
| 98 int64 wait_timestamp = static_cast<uint32>(lockword_); | |
| 99 ANNOTATE_RWLOCK_RELEASED(this, 1); | 100 ANNOTATE_RWLOCK_RELEASED(this, 1); |
| 100 Release_Store(&lockword_, 0); | 101 base::subtle::Release_Store(&lockword_, kSpinLockFree); |
| 101 if (wait_timestamp != 1) { | 102 if (wait_cycles != kSpinLockHeld) { |
| 102 // Collect contentionz profile info, and speed the wakeup of any waiter. | 103 // Collect contentionz profile info, and speed the wakeup of any waiter. |
| 103 // The lockword_ value indicates when the waiter started waiting. | 104 // The wait_cycles value indicates how long this thread spent waiting |
| 104 SlowUnlock(wait_timestamp); | 105 // for the lock. |
| 106 SlowUnlock(wait_cycles); |
| 105 } | 107 } |
| 106 } | 108 } |
| 107 | 109 |
| 108 // Report if we think the lock can be held by this thread. | 110 // Determine if the lock is held. When the lock is held by the invoking |
| 109 // When the lock is truly held by the invoking thread | 111 // thread, true will always be returned. Intended to be used as |
| 110 // we will always return true. | 112 // CHECK(lock.IsHeld()). |
| 111 // Indended to be used as CHECK(lock.IsHeld()); | |
| 112 inline bool IsHeld() const { | 113 inline bool IsHeld() const { |
| 113 return lockword_ != 0; | 114 return base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree; |
| 114 } | 115 } |
| 115 | 116 |
| 116 // The timestamp for contention lock profiling must fit into 31 bits. | |
| 117 // as lockword_ is 32 bits and we loose an additional low-order bit due | |
| 118 // to the statement "now |= 1" in SlowLock(). | |
| 119 // To select 31 bits from the 64-bit cycle counter, we shift right by | |
| 120 // PROFILE_TIMESTAMP_SHIFT = 7. | |
| 121 // Using these 31 bits, we reduce granularity of time measurement to | |
| 122 // 256 cycles, and will loose track of wait time for waits greater than | |
| 123 // 109 seconds on a 5 GHz machine, longer for faster clock cycles. | |
| 124 // Waits this long should be very rare. | |
| 125 enum { PROFILE_TIMESTAMP_SHIFT = 7 }; | |
| 126 | |
| 127 static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat | 117 static const base::LinkerInitialized LINKER_INITIALIZED; // backwards compat |
| 128 private: | 118 private: |
| 129 // Lock-state: 0 means unlocked; 1 means locked with no waiters; values | 119 enum { kSpinLockFree = 0 }; |
| 130 // greater than 1 indicate locked with waiters, where the value is the time | 120 enum { kSpinLockHeld = 1 }; |
| 131 // the first waiter started waiting and is used for contention profiling. | 121 enum { kSpinLockSleeper = 2 }; |
| 122 |
| 132 volatile Atomic32 lockword_; | 123 volatile Atomic32 lockword_; |
| 133 | 124 |
| 134 void SlowLock(); | 125 void SlowLock(); |
| 135 void SlowUnlock(int64 wait_timestamp); | 126 void SlowUnlock(uint64 wait_cycles); |
| 127 Atomic32 SpinLoop(int64 initial_wait_timestamp, Atomic32* wait_cycles); |
| 128 inline int32 CalculateWaitCycles(int64 wait_start_time); |
| 136 | 129 |
| 137 DISALLOW_COPY_AND_ASSIGN(SpinLock); | 130 DISALLOW_COPY_AND_ASSIGN(SpinLock); |
| 138 }; | 131 }; |
| 139 | 132 |
| 140 // Corresponding locker object that arranges to acquire a spinlock for | 133 // Corresponding locker object that arranges to acquire a spinlock for |
| 141 // the duration of a C++ scope. | 134 // the duration of a C++ scope. |
| 142 class SCOPED_LOCKABLE SpinLockHolder { | 135 class SCOPED_LOCKABLE SpinLockHolder { |
| 143 private: | 136 private: |
| 144 SpinLock* lock_; | 137 SpinLock* lock_; |
| 145 public: | 138 public: |
| 146 inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l) | 139 inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l) |
| 147 : lock_(l) { | 140 : lock_(l) { |
| 148 l->Lock(); | 141 l->Lock(); |
| 149 } | 142 } |
| 150 // TODO(csilvers): uncomment the annotation when we figure out how to | 143 // TODO(csilvers): uncomment the annotation when we figure out how to |
| 151 // support this macro with 0 args (see thread_annotations.h) | 144 // support this macro with 0 args (see thread_annotations.h) |
| 152 inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); } | 145 inline ~SpinLockHolder() /*UNLOCK_FUNCTION()*/ { lock_->Unlock(); } |
| 153 }; | 146 }; |
| 154 // Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock); | 147 // Catch bug where variable name is omitted, e.g. SpinLockHolder (&lock); |
| 155 #define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name) | 148 #define SpinLockHolder(x) COMPILE_ASSERT(0, spin_lock_decl_missing_var_name) |
| 156 | 149 |
| 157 | 150 |
| 158 #endif // BASE_SPINLOCK_H_ | 151 #endif // BASE_SPINLOCK_H_ |
| OLD | NEW |