OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/heap/SafePoint.h" | 5 #include "platform/heap/SafePoint.h" |
6 | 6 |
7 #include "platform/heap/Heap.h" | 7 #include "platform/heap/Heap.h" |
8 #include "wtf/Atomics.h" | 8 #include "wtf/Atomics.h" |
9 #include "wtf/CurrentTime.h" | 9 #include "wtf/CurrentTime.h" |
10 | 10 |
11 namespace blink { | 11 namespace blink { |
12 | 12 |
13 using PushAllRegistersCallback = void (*)(SafePointBarrier*, ThreadState*, intpt
r_t*); | 13 using PushAllRegistersCallback = void (*)(SafePointBarrier*, ThreadState*, intpt
r_t*); |
14 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste
rsCallback); | 14 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste
rsCallback); |
15 | 15 |
16 static double lockingTimeout() | 16 static double lockingTimeout() |
17 { | 17 { |
18 // Wait time for parking all threads is at most 100 ms. | 18 // Wait time for parking all threads is at most 100 ms. |
19 return 0.100; | 19 return 0.100; |
20 } | 20 } |
21 | 21 |
22 SafePointBarrier::SafePointBarrier() | 22 SafePointBarrier::SafePointBarrier() |
23 : m_canResume(1) | 23 : m_unparkedThreadCount(0) |
24 , m_unparkedThreadCount(0) | 24 , m_parkingRequested(0) |
25 { | 25 { |
26 } | 26 } |
27 | 27 |
28 SafePointBarrier::~SafePointBarrier() | 28 SafePointBarrier::~SafePointBarrier() |
29 { | 29 { |
30 } | 30 } |
31 | 31 |
32 bool SafePointBarrier::parkOthers() | 32 bool SafePointBarrier::parkOthers() |
33 { | 33 { |
34 ASSERT(ThreadState::current()->isAtSafePoint()); | 34 ASSERT(ThreadState::current()->isAtSafePoint()); |
35 | 35 |
36 ThreadState* current = ThreadState::current(); | 36 ThreadState* current = ThreadState::current(); |
37 // Lock threadAttachMutex() to prevent threads from attaching. | 37 // Lock threadAttachMutex() to prevent threads from attaching. |
38 current->lockThreadAttachMutex(); | 38 current->lockThreadAttachMutex(); |
39 const ThreadStateSet& threads = current->heap().threads(); | 39 const ThreadStateSet& threads = current->heap().threads(); |
40 | 40 |
41 MutexLocker locker(m_mutex); | 41 MutexLocker locker(m_mutex); |
42 atomicAdd(&m_unparkedThreadCount, threads.size()); | 42 atomicAdd(&m_unparkedThreadCount, threads.size()); |
43 releaseStore(&m_canResume, 0); | 43 releaseStore(&m_parkingRequested, 1); |
44 | 44 |
45 for (ThreadState* state : threads) { | 45 for (ThreadState* state : threads) { |
46 if (state == current) | 46 if (state == current) |
47 continue; | 47 continue; |
48 | 48 |
49 for (auto& interruptor : state->interruptors()) | 49 for (auto& interruptor : state->interruptors()) |
50 interruptor->requestInterrupt(); | 50 interruptor->requestInterrupt(); |
51 } | 51 } |
52 | 52 |
53 while (acquireLoad(&m_unparkedThreadCount) > 0) { | 53 while (acquireLoad(&m_unparkedThreadCount) > 0) { |
54 double expirationTime = currentTime() + lockingTimeout(); | 54 double expirationTime = currentTime() + lockingTimeout(); |
55 if (!m_parked.timedWait(m_mutex, expirationTime)) { | 55 if (!m_parked.timedWait(m_mutex, expirationTime)) { |
56 // One of the other threads did not return to a safepoint within the
maximum | 56 // One of the other threads did not return to a safepoint within the
maximum |
57 // time we allow for threads to be parked. Abandon the GC and resume
the | 57 // time we allow for threads to be parked. Abandon the GC and resume
the |
58 // currently parked threads. | 58 // currently parked threads. |
59 resumeOthers(true); | 59 resumeOthers(true); |
60 return false; | 60 return false; |
61 } | 61 } |
62 } | 62 } |
63 return true; | 63 return true; |
64 } | 64 } |
65 | 65 |
66 void SafePointBarrier::resumeOthers(bool barrierLocked) | 66 void SafePointBarrier::resumeOthers(bool barrierLocked) |
67 { | 67 { |
68 ThreadState* current = ThreadState::current(); | 68 ThreadState* current = ThreadState::current(); |
69 const ThreadStateSet& threads = current->heap().threads(); | 69 const ThreadStateSet& threads = current->heap().threads(); |
70 atomicSubtract(&m_unparkedThreadCount, threads.size()); | 70 atomicSubtract(&m_unparkedThreadCount, threads.size()); |
71 releaseStore(&m_canResume, 1); | 71 releaseStore(&m_parkingRequested, 0); |
72 | 72 |
73 if (UNLIKELY(barrierLocked)) { | 73 if (UNLIKELY(barrierLocked)) { |
74 m_resume.broadcast(); | 74 m_resume.broadcast(); |
75 } else { | 75 } else { |
76 // FIXME: Resumed threads will all contend for m_mutex just | 76 // FIXME: Resumed threads will all contend for m_mutex just |
77 // to unlock it later which is a waste of resources. | 77 // to unlock it later which is a waste of resources. |
78 MutexLocker locker(m_mutex); | 78 MutexLocker locker(m_mutex); |
79 m_resume.broadcast(); | 79 m_resume.broadcast(); |
80 } | 80 } |
81 | 81 |
82 current->unlockThreadAttachMutex(); | 82 current->unlockThreadAttachMutex(); |
83 ASSERT(ThreadState::current()->isAtSafePoint()); | 83 ASSERT(ThreadState::current()->isAtSafePoint()); |
84 } | 84 } |
85 | 85 |
86 void SafePointBarrier::checkAndPark(ThreadState* state, SafePointAwareMutexLocke
r* locker) | 86 void SafePointBarrier::checkAndPark(ThreadState* state, SafePointAwareMutexLocke
r* locker) |
87 { | 87 { |
88 ASSERT(!state->sweepForbidden()); | 88 ASSERT(!state->sweepForbidden()); |
89 if (!acquireLoad(&m_canResume)) { | 89 if (acquireLoad(&m_parkingRequested)) { |
90 // If we are leaving the safepoint from a SafePointAwareMutexLocker | 90 // If we are leaving the safepoint from a SafePointAwareMutexLocker |
91 // call out to release the lock before going to sleep. This enables the | 91 // call out to release the lock before going to sleep. This enables the |
92 // lock to be acquired in the sweep phase, e.g. during weak processing | 92 // lock to be acquired in the sweep phase, e.g. during weak processing |
93 // or finalization. The SafePointAwareLocker will reenter the safepoint | 93 // or finalization. The SafePointAwareLocker will reenter the safepoint |
94 // and reacquire the lock after leaving this safepoint. | 94 // and reacquire the lock after leaving this safepoint. |
95 if (locker) | 95 if (locker) |
96 locker->reset(); | 96 locker->reset(); |
97 pushAllRegisters(this, state, parkAfterPushRegisters); | 97 pushAllRegisters(this, state, parkAfterPushRegisters); |
98 } | 98 } |
99 } | 99 } |
100 | 100 |
101 void SafePointBarrier::enterSafePoint(ThreadState* state) | 101 void SafePointBarrier::enterSafePoint(ThreadState* state) |
102 { | 102 { |
103 ASSERT(!state->sweepForbidden()); | 103 ASSERT(!state->sweepForbidden()); |
104 pushAllRegisters(this, state, enterSafePointAfterPushRegisters); | 104 pushAllRegisters(this, state, enterSafePointAfterPushRegisters); |
105 } | 105 } |
106 | 106 |
107 void SafePointBarrier::leaveSafePoint(ThreadState* state, SafePointAwareMutexLoc
ker* locker) | 107 void SafePointBarrier::leaveSafePoint(ThreadState* state, SafePointAwareMutexLoc
ker* locker) |
108 { | 108 { |
109 if (atomicIncrement(&m_unparkedThreadCount) > 0) | 109 if (atomicIncrement(&m_unparkedThreadCount) > 0) |
110 checkAndPark(state, locker); | 110 checkAndPark(state, locker); |
111 } | 111 } |
112 | 112 |
113 void SafePointBarrier::doPark(ThreadState* state, intptr_t* stackEnd) | 113 void SafePointBarrier::doPark(ThreadState* state, intptr_t* stackEnd) |
114 { | 114 { |
115 state->recordStackEnd(stackEnd); | 115 state->recordStackEnd(stackEnd); |
116 MutexLocker locker(m_mutex); | 116 MutexLocker locker(m_mutex); |
117 if (!atomicDecrement(&m_unparkedThreadCount)) | 117 if (!atomicDecrement(&m_unparkedThreadCount)) |
118 m_parked.signal(); | 118 m_parked.signal(); |
119 while (!acquireLoad(&m_canResume)) | 119 while (acquireLoad(&m_parkingRequested)) |
120 m_resume.wait(m_mutex); | 120 m_resume.wait(m_mutex); |
121 atomicIncrement(&m_unparkedThreadCount); | 121 atomicIncrement(&m_unparkedThreadCount); |
122 } | 122 } |
123 | 123 |
124 void SafePointBarrier::doEnterSafePoint(ThreadState* state, intptr_t* stackEnd) | 124 void SafePointBarrier::doEnterSafePoint(ThreadState* state, intptr_t* stackEnd) |
125 { | 125 { |
126 state->recordStackEnd(stackEnd); | 126 state->recordStackEnd(stackEnd); |
127 state->copyStackUntilSafePointScope(); | 127 state->copyStackUntilSafePointScope(); |
128 // m_unparkedThreadCount tracks amount of unparked threads. It is | |
129 // positive if and only if we have requested other threads to park | |
130 // at safe-points in preparation for GC. The last thread to park | |
131 // itself will make the counter hit zero and should notify GC thread | |
132 // that it is safe to proceed. | |
133 // If no other thread is waiting for other threads to park then | |
134 // this counter can be negative: if N threads are at safe-points | |
135 // the counter will be -N. | |
136 if (!atomicDecrement(&m_unparkedThreadCount)) { | 128 if (!atomicDecrement(&m_unparkedThreadCount)) { |
137 MutexLocker locker(m_mutex); | 129 MutexLocker locker(m_mutex); |
138 m_parked.signal(); // Safe point reached. | 130 m_parked.signal(); // Safe point reached. |
139 } | 131 } |
140 } | 132 } |
141 | 133 |
142 } // namespace blink | 134 } // namespace blink |
OLD | NEW |