Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/heap/SafePoint.h" | 5 #include "platform/heap/SafePoint.h" |
| 6 | 6 |
| 7 #include "wtf/Atomics.h" | 7 #include "wtf/Atomics.h" |
| 8 | 8 |
| 9 namespace blink { | 9 namespace blink { |
| 10 | 10 |
| 11 using PushAllRegistersCallback = void (*)(SafePointBarrier*, ThreadState*, intpt r_t*); | 11 using PushAllRegistersCallback = void (*)(SafePointBarrier*, ThreadState*, intpt r_t*); |
| 12 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); | 12 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); |
| 13 | 13 |
| 14 static double lockingTimeout() | 14 static double lockingTimeout() |
| 15 { | 15 { |
| 16 // Wait time for parking all threads is at most 100 ms. | 16 // Wait time for parking all threads is at most 100 ms. |
| 17 return 0.100; | 17 return 0.100; |
| 18 } | 18 } |
| 19 | 19 |
| 20 SafePointBarrier::SafePointBarrier() | 20 SafePointBarrier::SafePointBarrier(MultiThreadGCGroup* gcGroup) |
| 21 : m_canResume(1) | 21 : m_canResume(1) |
| 22 , m_unparkedThreadCount(0) | 22 , m_unparkedThreadCount(0) |
| 23 , m_gcGroup(gcGroup) | |
| 23 { | 24 { |
| 24 } | 25 } |
| 25 | 26 |
| 26 SafePointBarrier::~SafePointBarrier() | 27 SafePointBarrier::~SafePointBarrier() |
| 27 { | 28 { |
| 28 } | 29 } |
| 29 | 30 |
| 30 bool SafePointBarrier::parkOthers() | 31 bool SafePointBarrier::parkOthers() |
| 31 { | 32 { |
| 32 ASSERT(ThreadState::current()->isAtSafePoint()); | 33 ASSERT(ThreadState::current()->isAtSafePoint()); |
| 33 | 34 |
| 34 ThreadState* current = ThreadState::current(); | |
| 35 // Lock threadAttachMutex() to prevent threads from attaching. | 35 // Lock threadAttachMutex() to prevent threads from attaching. |
| 36 ThreadState::lockThreadAttachMutex(); | 36 m_gcGroup->lockThreadAttachMutex(); |
| 37 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 37 const HashSet<ThreadState*>& threads = m_gcGroup->threads(); |
| 38 | 38 |
| 39 MutexLocker locker(m_mutex); | 39 MutexLocker locker(m_mutex); |
| 40 atomicAdd(&m_unparkedThreadCount, threads.size()); | 40 atomicAdd(&m_unparkedThreadCount, threads.size()); |
| 41 releaseStore(&m_canResume, 0); | 41 releaseStore(&m_canResume, 0); |
| 42 | 42 |
| 43 for (ThreadState* state : threads) { | 43 for (ThreadState* state : threads) { |
| 44 if (state == current) | 44 if (state->isAtSafePoint()) // MEMO: original code: if (state == current ) |
|
haraken
2016/01/28 15:52:50
Why do you need to make this change?
This change
keishi
2016/02/29 06:02:33
Done. Just thought it made more sense.
| |
| 45 continue; | 45 continue; |
| 46 | 46 |
| 47 for (auto& interruptor : state->interruptors()) | 47 for (auto& interruptor : state->interruptors()) |
| 48 interruptor->requestInterrupt(); | 48 interruptor->requestInterrupt(); |
| 49 } | 49 } |
| 50 | 50 |
| 51 while (acquireLoad(&m_unparkedThreadCount) > 0) { | 51 while (acquireLoad(&m_unparkedThreadCount) > 0) { |
| 52 double expirationTime = currentTime() + lockingTimeout(); | 52 double expirationTime = currentTime() + lockingTimeout(); |
| 53 if (!m_parked.timedWait(m_mutex, expirationTime)) { | 53 if (!m_parked.timedWait(m_mutex, expirationTime)) { |
| 54 // One of the other threads did not return to a safepoint within the maximum | 54 // One of the other threads did not return to a safepoint within the maximum |
| 55 // time we allow for threads to be parked. Abandon the GC and resume the | 55 // time we allow for threads to be parked. Abandon the GC and resume the |
| 56 // currently parked threads. | 56 // currently parked threads. |
| 57 resumeOthers(true); | 57 resumeOthers(true); |
| 58 return false; | 58 return false; |
| 59 } | 59 } |
| 60 } | 60 } |
| 61 return true; | 61 return true; |
| 62 } | 62 } |
| 63 | 63 |
| 64 void SafePointBarrier::resumeOthers(bool barrierLocked) | 64 void SafePointBarrier::resumeOthers(bool barrierLocked) |
| 65 { | 65 { |
| 66 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 66 const HashSet<ThreadState*>& threads = m_gcGroup->threads(); |
| 67 atomicSubtract(&m_unparkedThreadCount, threads.size()); | 67 atomicSubtract(&m_unparkedThreadCount, threads.size()); |
| 68 releaseStore(&m_canResume, 1); | 68 releaseStore(&m_canResume, 1); |
| 69 | 69 |
| 70 if (UNLIKELY(barrierLocked)) { | 70 if (UNLIKELY(barrierLocked)) { |
| 71 m_resume.broadcast(); | 71 m_resume.broadcast(); |
| 72 } else { | 72 } else { |
| 73 // FIXME: Resumed threads will all contend for m_mutex just | 73 // FIXME: Resumed threads will all contend for m_mutex just |
| 74 // to unlock it later which is a waste of resources. | 74 // to unlock it later which is a waste of resources. |
| 75 MutexLocker locker(m_mutex); | 75 MutexLocker locker(m_mutex); |
| 76 m_resume.broadcast(); | 76 m_resume.broadcast(); |
| 77 } | 77 } |
| 78 | 78 |
| 79 ThreadState::unlockThreadAttachMutex(); | 79 m_gcGroup->unlockThreadAttachMutex(); |
| 80 ASSERT(ThreadState::current()->isAtSafePoint()); | 80 ASSERT(ThreadState::current()->isAtSafePoint()); |
| 81 } | 81 } |
| 82 | 82 |
| 83 void SafePointBarrier::checkAndPark(ThreadState* state, SafePointAwareMutexLocke r* locker) | 83 void SafePointBarrier::checkAndPark(ThreadState* state, SafePointAwareMutexLocke r* locker) |
| 84 { | 84 { |
| 85 ASSERT(!state->sweepForbidden()); | 85 ASSERT(!state->sweepForbidden()); |
| 86 if (!acquireLoad(&m_canResume)) { | 86 if (!acquireLoad(&m_canResume)) { |
| 87 // If we are leaving the safepoint from a SafePointAwareMutexLocker | 87 // If we are leaving the safepoint from a SafePointAwareMutexLocker |
| 88 // call out to release the lock before going to sleep. This enables the | 88 // call out to release the lock before going to sleep. This enables the |
| 89 // lock to be acquired in the sweep phase, e.g. during weak processing | 89 // lock to be acquired in the sweep phase, e.g. during weak processing |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 130 // If no other thread is waiting for other threads to park then | 130 // If no other thread is waiting for other threads to park then |
| 131 // this counter can be negative: if N threads are at safe-points | 131 // this counter can be negative: if N threads are at safe-points |
| 132 // the counter will be -N. | 132 // the counter will be -N. |
| 133 if (!atomicDecrement(&m_unparkedThreadCount)) { | 133 if (!atomicDecrement(&m_unparkedThreadCount)) { |
| 134 MutexLocker locker(m_mutex); | 134 MutexLocker locker(m_mutex); |
| 135 m_parked.signal(); // Safe point reached. | 135 m_parked.signal(); // Safe point reached. |
| 136 } | 136 } |
| 137 } | 137 } |
| 138 | 138 |
| 139 } // namespace blink | 139 } // namespace blink |
| OLD | NEW |