Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(168)

Side by Side Diff: third_party/tcmalloc/chromium/src/base/spinlock.cc

Issue 7050034: Merge google-perftools r109 (the current contents of third_party/tcmalloc/vendor) (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: '' Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* Copyright (c) 2006, Google Inc. 1 /* Copyright (c) 2006, Google Inc.
2 * All rights reserved. 2 * All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 14 matching lines...) Expand all
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 * 29 *
30 * --- 30 * ---
31 * Author: Sanjay Ghemawat 31 * Author: Sanjay Ghemawat
32 */ 32 */
33 33
34 #include <config.h> 34 #include <config.h>
35 #include <time.h> /* For nanosleep() */
36 #ifdef HAVE_SCHED_H
37 #include <sched.h> /* For sched_yield() */
38 #endif
39 #ifdef HAVE_UNISTD_H
40 #include <unistd.h> /* For read() */
41 #endif
42 #include <fcntl.h> /* for open(), O_RDONLY */
43 #include <string.h> /* for strncmp */
44 #include <errno.h>
45 #include "base/spinlock.h" 35 #include "base/spinlock.h"
36 #include "base/synchronization_profiling.h"
37 #include "base/spinlock_internal.h"
46 #include "base/cycleclock.h" 38 #include "base/cycleclock.h"
47 #include "base/sysinfo.h" /* for NumCPUs() */ 39 #include "base/sysinfo.h" /* for NumCPUs() */
48 40
49 // We can do contention-profiling of SpinLocks, but the code is in 41 // NOTE on the Lock-state values:
50 // mutex.cc, which is not always linked in with spinlock. Hence we 42 //
51 // provide this weak definition, which is used if mutex.cc isn't linked in. 43 // kSpinLockFree represents the unlocked state
52 ATTRIBUTE_WEAK extern void SubmitSpinLockProfileData(const void *, int64); 44 // kSpinLockHeld represents the locked state with no waiters
53 void SubmitSpinLockProfileData(const void *, int64) {} 45 //
46 // Values greater than kSpinLockHeld represent the locked state with waiters,
47 // where the value is the time the current lock holder had to
48 // wait before obtaining the lock. The kSpinLockSleeper state is a special
49 // "locked with waiters" state that indicates that a sleeper needs to
50 // be woken, but the thread that just released the lock didn't wait.
54 51
55 static int adaptive_spin_count = 0; 52 static int adaptive_spin_count = 0;
56 53
57 const base::LinkerInitialized SpinLock::LINKER_INITIALIZED = 54 const base::LinkerInitialized SpinLock::LINKER_INITIALIZED =
58 base::LINKER_INITIALIZED; 55 base::LINKER_INITIALIZED;
59 56
60 // The OS-specific header included below must provide two calls:
61 // Wait until *w becomes zero, atomically set it to 1 and return.
62 // static void SpinLockWait(volatile Atomic32 *w);
63 //
64 // Hint that a thread waiting in SpinLockWait() could now make progress. May
65 // do nothing. This call may not read or write *w; it must use only the
66 // address.
67 // static void SpinLockWake(volatile Atomic32 *w);
68 #if defined(_WIN32)
69 #include "base/spinlock_win32-inl.h"
70 #elif defined(__linux__)
71 #include "base/spinlock_linux-inl.h"
72 #else
73 #include "base/spinlock_posix-inl.h"
74 #endif
75
76 namespace { 57 namespace {
77 struct SpinLock_InitHelper { 58 struct SpinLock_InitHelper {
78 SpinLock_InitHelper() { 59 SpinLock_InitHelper() {
79 // On multi-cpu machines, spin for longer before yielding 60 // On multi-cpu machines, spin for longer before yielding
80 // the processor or sleeping. Reduces idle time significantly. 61 // the processor or sleeping. Reduces idle time significantly.
81 if (NumCPUs() > 1) { 62 if (NumCPUs() > 1) {
82 adaptive_spin_count = 1000; 63 adaptive_spin_count = 1000;
83 } 64 }
84 } 65 }
85 }; 66 };
86 67
87 // Hook into global constructor execution: 68 // Hook into global constructor execution:
88 // We do not do adaptive spinning before that, 69 // We do not do adaptive spinning before that,
89 // but nothing lock-intensive should be going on at that time. 70 // but nothing lock-intensive should be going on at that time.
90 static SpinLock_InitHelper init_helper; 71 static SpinLock_InitHelper init_helper;
91 72
92 } // unnamed namespace 73 } // unnamed namespace
93 74
75 // Monitor the lock to see if its value changes within some time period
76 // (adaptive_spin_count loop iterations). A timestamp indicating
77 // when the thread initially started waiting for the lock is passed in via
78 // the initial_wait_timestamp value. The total wait time in cycles for the
79 // lock is returned in the wait_cycles parameter. The last value read
80 // from the lock is returned from the method.
81 Atomic32 SpinLock::SpinLoop(int64 initial_wait_timestamp,
82 Atomic32* wait_cycles) {
83 int c = adaptive_spin_count;
84 while (base::subtle::NoBarrier_Load(&lockword_) != kSpinLockFree && --c > 0) {
85 }
86 Atomic32 spin_loop_wait_cycles = CalculateWaitCycles(initial_wait_timestamp);
87 Atomic32 lock_value =
88 base::subtle::Acquire_CompareAndSwap(&lockword_, kSpinLockFree,
89 spin_loop_wait_cycles);
90 *wait_cycles = spin_loop_wait_cycles;
91 return lock_value;
92 }
94 93
95 void SpinLock::SlowLock() { 94 void SpinLock::SlowLock() {
96 int c = adaptive_spin_count; 95 // The lock was not obtained initially, so this thread needs to wait for
96 // it. Record the current timestamp in the local variable wait_start_time
97 // so the total wait time can be stored in the lockword once this thread
98 // obtains the lock.
99 int64 wait_start_time = CycleClock::Now();
100 Atomic32 wait_cycles;
101 Atomic32 lock_value = SpinLoop(wait_start_time, &wait_cycles);
97 102
98 // Spin a few times in the hope that the lock holder releases the lock 103 int lock_wait_call_count = 0;
99 while ((c > 0) && (lockword_ != 0)) { 104 while (lock_value != kSpinLockFree) {
100 c--; 105 // If the lock is currently held, but not marked as having a sleeper, mark
106 // it as having a sleeper.
107 if (lock_value == kSpinLockHeld) {
108 // Here, just "mark" that the thread is going to sleep. Don't store the
109 // lock wait time in the lock as that will cause the current lock
110 // owner to think it experienced contention.
111 lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
112 kSpinLockHeld,
113 kSpinLockSleeper);
114 if (lock_value == kSpinLockHeld) {
115 // Successfully transitioned to kSpinLockSleeper. Pass
116 // kSpinLockSleeper to the SpinLockWait routine to properly indicate
117 // the last lock_value observed.
118 lock_value = kSpinLockSleeper;
119 } else if (lock_value == kSpinLockFree) {
120 // Lock is free again, so try and aquire it before sleeping. The
121 // new lock state will be the number of cycles this thread waited if
122 // this thread obtains the lock.
123 lock_value = base::subtle::Acquire_CompareAndSwap(&lockword_,
124 kSpinLockFree,
125 wait_cycles);
126 continue; // skip the delay at the end of the loop
127 }
128 }
129
130 // Wait for an OS specific delay.
131 base::internal::SpinLockDelay(&lockword_, lock_value,
132 ++lock_wait_call_count);
133 // Spin again after returning from the wait routine to give this thread
134 // some chance of obtaining the lock.
135 lock_value = SpinLoop(wait_start_time, &wait_cycles);
101 } 136 }
102
103 if (lockword_ == 1) {
104 int32 now = (CycleClock::Now() >> PROFILE_TIMESTAMP_SHIFT);
105 // Don't loose the lock: make absolutely sure "now" is not zero
106 now |= 1;
107 // Atomically replace the value of lockword_ with "now" if
108 // lockword_ is 1, thereby remembering the first timestamp to
109 // be recorded.
110 base::subtle::NoBarrier_CompareAndSwap(&lockword_, 1, now);
111 // base::subtle::NoBarrier_CompareAndSwap() returns:
112 // 0: the lock is/was available; nothing stored
113 // 1: our timestamp was stored
114 // > 1: an older timestamp is already in lockword_; nothing stored
115 }
116
117 SpinLockWait(&lockword_); // wait until lock acquired; OS specific
118 } 137 }
119 138
120 void SpinLock::SlowUnlock(int64 wait_timestamp) { 139 // The wait time for contentionz lock profiling must fit into 32 bits.
121 SpinLockWake(&lockword_); // wake waiter if necessary; OS specific 140 // However, the lower 32-bits of the cycle counter wrap around too quickly
141 // with high frequency processors, so a right-shift by 7 is performed to
142 // quickly divide the cycles by 128. Using these 32 bits, reduces the
143 // granularity of time measurement to 128 cycles, and loses track
144 // of wait time for waits greater than 109 seconds on a 5 GHz machine
145 // [(2^32 cycles/5 Ghz)*128 = 109.95 seconds]. Waits this long should be
146 // very rare and the reduced granularity should not be an issue given
147 // processors in the Google fleet operate at a minimum of one billion
148 // cycles/sec.
149 enum { PROFILE_TIMESTAMP_SHIFT = 7 };
122 150
123 // Collect contentionz profile info. Subtract one from wait_timestamp as 151 void SpinLock::SlowUnlock(uint64 wait_cycles) {
124 // antidote to "now |= 1;" in SlowLock(). 152 base::internal::SpinLockWake(&lockword_, false); // wake waiter if necessary
125 SubmitSpinLockProfileData(this, wait_timestamp - 1); 153
154 // Collect contentionz profile info, expanding the wait_cycles back out to
155 // the full value. If wait_cycles is <= kSpinLockSleeper, then no wait
156 // was actually performed, so don't record the wait time. Note, that the
157 // CalculateWaitCycles method adds in kSpinLockSleeper cycles
158 // unconditionally to guarantee the wait time is not kSpinLockFree or
159 // kSpinLockHeld. The adding in of these small number of cycles may
160 // overestimate the contention by a slight amount 50% of the time. However,
161 // if this code tried to correct for that addition by subtracting out the
162 // kSpinLockSleeper amount that would underestimate the contention slightly
163 // 50% of the time. Both ways get the wrong answer, so the code
164 // overestimates to be more conservative. Overestimating also makes the code
165 // a little simpler.
166 //
167 if (wait_cycles > kSpinLockSleeper) {
168 base::SubmitSpinLockProfileData(this,
169 wait_cycles << PROFILE_TIMESTAMP_SHIFT);
170 }
126 } 171 }
172
173 inline int32 SpinLock::CalculateWaitCycles(int64 wait_start_time) {
174 int32 wait_cycles = ((CycleClock::Now() - wait_start_time) >>
175 PROFILE_TIMESTAMP_SHIFT);
176 // The number of cycles waiting for the lock is used as both the
177 // wait_cycles and lock value, so it can't be kSpinLockFree or
178 // kSpinLockHeld. Make sure the value returned is at least
179 // kSpinLockSleeper.
180 wait_cycles |= kSpinLockSleeper;
181 return wait_cycles;
182 }
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/base/spinlock.h ('k') | third_party/tcmalloc/chromium/src/base/spinlock_internal.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698