| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 108 { | 108 { |
| 109 ASSERT(ThreadState::current()->isAtSafePoint()); | 109 ASSERT(ThreadState::current()->isAtSafePoint()); |
| 110 | 110 |
| 111 // Lock threadAttachMutex() to prevent threads from attaching. | 111 // Lock threadAttachMutex() to prevent threads from attaching. |
| 112 threadAttachMutex().lock(); | 112 threadAttachMutex().lock(); |
| 113 | 113 |
| 114 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre
ads(); | 114 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre
ads(); |
| 115 | 115 |
| 116 MutexLocker locker(m_mutex); | 116 MutexLocker locker(m_mutex); |
| 117 atomicAdd(&m_unparkedThreadCount, threads.size()); | 117 atomicAdd(&m_unparkedThreadCount, threads.size()); |
| 118 atomicSetOneToZero(&m_canResume); | 118 releaseStore(&m_canResume, 0); |
| 119 | 119 |
| 120 ThreadState* current = ThreadState::current(); | 120 ThreadState* current = ThreadState::current(); |
| 121 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(),
end = threads.end(); it != end; ++it) { | 121 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(),
end = threads.end(); it != end; ++it) { |
| 122 if (*it == current) | 122 if (*it == current) |
| 123 continue; | 123 continue; |
| 124 | 124 |
| 125 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter
ruptors(); | 125 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter
ruptors(); |
| 126 for (size_t i = 0; i < interruptors.size(); i++) | 126 for (size_t i = 0; i < interruptors.size(); i++) |
| 127 interruptors[i]->requestInterrupt(); | 127 interruptors[i]->requestInterrupt(); |
| 128 } | 128 } |
| 129 | 129 |
| 130 while (m_unparkedThreadCount > 0) | 130 while (acquireLoad(&m_unparkedThreadCount) > 0) |
| 131 m_parked.wait(m_mutex); | 131 m_parked.wait(m_mutex); |
| 132 } | 132 } |
| 133 | 133 |
| 134 void resumeOthers() | 134 void resumeOthers() |
| 135 { | 135 { |
| 136 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre
ads(); | 136 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre
ads(); |
| 137 atomicSubtract(&m_unparkedThreadCount, threads.size()); | 137 atomicSubtract(&m_unparkedThreadCount, threads.size()); |
| 138 atomicTestAndSetToOne(&m_canResume); | 138 releaseStore(&m_canResume, 1); |
| 139 { | 139 { |
| 140 // FIXME: Resumed threads will all contend for | 140 // FIXME: Resumed threads will all contend for |
| 141 // m_mutex just to unlock it later which is a waste of | 141 // m_mutex just to unlock it later which is a waste of |
| 142 // resources. | 142 // resources. |
| 143 MutexLocker locker(m_mutex); | 143 MutexLocker locker(m_mutex); |
| 144 m_resume.broadcast(); | 144 m_resume.broadcast(); |
| 145 } | 145 } |
| 146 | 146 |
| 147 ThreadState* current = ThreadState::current(); | 147 ThreadState* current = ThreadState::current(); |
| 148 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(),
end = threads.end(); it != end; ++it) { | 148 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(),
end = threads.end(); it != end; ++it) { |
| 149 if (*it == current) | 149 if (*it == current) |
| 150 continue; | 150 continue; |
| 151 | 151 |
| 152 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter
ruptors(); | 152 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter
ruptors(); |
| 153 for (size_t i = 0; i < interruptors.size(); i++) | 153 for (size_t i = 0; i < interruptors.size(); i++) |
| 154 interruptors[i]->clearInterrupt(); | 154 interruptors[i]->clearInterrupt(); |
| 155 } | 155 } |
| 156 | 156 |
| 157 threadAttachMutex().unlock(); | 157 threadAttachMutex().unlock(); |
| 158 ASSERT(ThreadState::current()->isAtSafePoint()); | 158 ASSERT(ThreadState::current()->isAtSafePoint()); |
| 159 } | 159 } |
| 160 | 160 |
| 161 void doPark(ThreadState* state, intptr_t* stackEnd) | 161 void doPark(ThreadState* state, intptr_t* stackEnd) |
| 162 { | 162 { |
| 163 state->recordStackEnd(stackEnd); | 163 state->recordStackEnd(stackEnd); |
| 164 MutexLocker locker(m_mutex); | 164 MutexLocker locker(m_mutex); |
| 165 if (!atomicDecrement(&m_unparkedThreadCount)) | 165 if (!atomicDecrement(&m_unparkedThreadCount)) |
| 166 m_parked.signal(); | 166 m_parked.signal(); |
| 167 while (!m_canResume) | 167 while (!acquireLoad(&m_canResume)) |
| 168 m_resume.wait(m_mutex); | 168 m_resume.wait(m_mutex); |
| 169 atomicIncrement(&m_unparkedThreadCount); | 169 atomicIncrement(&m_unparkedThreadCount); |
| 170 } | 170 } |
| 171 | 171 |
| 172 void checkAndPark(ThreadState* state) | 172 void checkAndPark(ThreadState* state) |
| 173 { | 173 { |
| 174 ASSERT(!state->isSweepInProgress()); | 174 ASSERT(!state->isSweepInProgress()); |
| 175 if (!m_canResume) { | 175 if (!acquireLoad(&m_canResume)) { |
| 176 pushAllRegisters(this, state, parkAfterPushRegisters); | 176 pushAllRegisters(this, state, parkAfterPushRegisters); |
| 177 state->performPendingSweep(); | 177 state->performPendingSweep(); |
| 178 } | 178 } |
| 179 } | 179 } |
| 180 | 180 |
| 181 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd) | 181 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd) |
| 182 { | 182 { |
| 183 state->recordStackEnd(stackEnd); | 183 state->recordStackEnd(stackEnd); |
| 184 // m_unparkedThreadCount tracks amount of unparked threads. It is | 184 // m_unparkedThreadCount tracks amount of unparked threads. It is |
| 185 // positive if and only if we have requested other threads to park | 185 // positive if and only if we have requested other threads to park |
| (...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 714 state->safePoint(HeapPointersOnStack); | 714 state->safePoint(HeapPointersOnStack); |
| 715 } | 715 } |
| 716 | 716 |
| 717 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 717 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
| 718 { | 718 { |
| 719 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 719 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
| 720 return threads; | 720 return threads; |
| 721 } | 721 } |
| 722 | 722 |
| 723 } | 723 } |
| OLD | NEW |