Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: src/optimizing-compiler-thread.cc

Issue 24237009: Less aggressive polling when concurrently compiling for OSR. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: platform ports Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 11 matching lines...) Expand all
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "optimizing-compiler-thread.h" 28 #include "optimizing-compiler-thread.h"
29 29
30 #include "v8.h" 30 #include "v8.h"
31 31
32 #include "full-codegen.h"
32 #include "hydrogen.h" 33 #include "hydrogen.h"
33 #include "isolate.h" 34 #include "isolate.h"
34 #include "v8threads.h" 35 #include "v8threads.h"
35 36
36 namespace v8 { 37 namespace v8 {
37 namespace internal { 38 namespace internal {
38 39
39 40
40 void OptimizingCompilerThread::Run() { 41 void OptimizingCompilerThread::Run() {
41 #ifdef DEBUG 42 #ifdef DEBUG
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
100 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); 101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
101 102
102 // The function may have already been optimized by OSR. Simply continue. 103 // The function may have already been optimized by OSR. Simply continue.
103 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); 104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
104 USE(status); // Prevent an unused-variable error in release mode. 105 USE(status); // Prevent an unused-variable error in release mode.
105 ASSERT(status != OptimizingCompiler::FAILED); 106 ASSERT(status != OptimizingCompiler::FAILED);
106 107
107 // The function may have already been optimized by OSR. Simply continue. 108 // The function may have already been optimized by OSR. Simply continue.
108 // Use a mutex to make sure that functions marked for install 109 // Use a mutex to make sure that functions marked for install
109 // are always also queued. 110 // are always also queued.
110 if (!optimizing_compiler->info()->osr_ast_id().IsNone()) { 111 LockGuard<Mutex> access_queue(&queue_mutex_);
111 ASSERT(FLAG_concurrent_osr); 112 output_queue_.Enqueue(optimizing_compiler);
112 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); 113 isolate_->stack_guard()->RequestInstallCode();
113 osr_candidates_.RemoveElement(optimizing_compiler);
114 ready_for_osr_.Add(optimizing_compiler);
115 } else {
116 LockGuard<Mutex> access_queue(&queue_mutex_);
117 output_queue_.Enqueue(optimizing_compiler);
118 isolate_->stack_guard()->RequestInstallCode();
119 }
120 } 114 }
121 115
122 116
117 static void DisposeOptimizingCompiler(OptimizingCompiler* compiler,
118 bool restore_function_code) {
119 CompilationInfo* info = compiler->info();
120 if (restore_function_code) {
121 Handle<JSFunction> function = info->closure();
122 function->ReplaceCode(function->shared()->code());
123 }
124 delete info;
125 }
126
127
123 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { 128 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
124 OptimizingCompiler* optimizing_compiler; 129 OptimizingCompiler* optimizing_compiler;
125 // The optimizing compiler is allocated in the CompilationInfo's zone. 130 // The optimizing compiler is allocated in the CompilationInfo's zone.
126 while (input_queue_.Dequeue(&optimizing_compiler)) { 131 while (input_queue_.Dequeue(&optimizing_compiler)) {
127 // This should not block, since we have one signal on the input queue 132 // This should not block, since we have one signal on the input queue
128 // semaphore corresponding to each element in the input queue. 133 // semaphore corresponding to each element in the input queue.
129 input_queue_semaphore_.Wait(); 134 input_queue_semaphore_.Wait();
130 CompilationInfo* info = optimizing_compiler->info(); 135 DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
131 if (restore_function_code) {
132 Handle<JSFunction> function = info->closure();
133 function->ReplaceCode(function->shared()->code());
134 }
135 delete info;
136 } 136 }
137 Release_Store(&queue_length_, static_cast<AtomicWord>(0)); 137 Release_Store(&queue_length_, static_cast<AtomicWord>(0));
138 138
139 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
140 osr_candidates_.Clear(); 139 osr_candidates_.Clear();
141 } 140 }
142 141
143 142
144 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { 143 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
145 OptimizingCompiler* optimizing_compiler; 144 OptimizingCompiler* optimizing_compiler;
146 // The optimizing compiler is allocated in the CompilationInfo's zone. 145 // The optimizing compiler is allocated in the CompilationInfo's zone.
147 while (true) { 146 while (true) {
148 { LockGuard<Mutex> access_queue(&queue_mutex_); 147 { LockGuard<Mutex> access_queue(&queue_mutex_);
149 if (!output_queue_.Dequeue(&optimizing_compiler)) break; 148 if (!output_queue_.Dequeue(&optimizing_compiler)) break;
150 } 149 }
151 CompilationInfo* info = optimizing_compiler->info(); 150 DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
152 if (restore_function_code) {
153 Handle<JSFunction> function = info->closure();
154 function->ReplaceCode(function->shared()->code());
155 }
156 delete info;
157 } 151 }
158 152
159 RemoveStaleOSRCandidates(0); 153 for (int i = 0; i < kOsrBufferSize; i++) {
154 optimizing_compiler = osr_buffer_[i];
155 if (optimizing_compiler != NULL) {
156 DisposeOptimizingCompiler(optimizing_compiler, restore_function_code);
157 }
158 }
159 osr_cursor_ = 0;
160 } 160 }
161 161
162 162
163 void OptimizingCompilerThread::Flush() { 163 void OptimizingCompilerThread::Flush() {
164 ASSERT(!IsOptimizerThread()); 164 ASSERT(!IsOptimizerThread());
165 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); 165 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
166 input_queue_semaphore_.Signal(); 166 input_queue_semaphore_.Signal();
167 stop_semaphore_.Wait(); 167 stop_semaphore_.Wait();
168 FlushOutputQueue(true); 168 FlushOutputQueue(true);
169 } 169 }
170 170
171 171
172 void OptimizingCompilerThread::Stop() { 172 void OptimizingCompilerThread::Stop() {
173 ASSERT(!IsOptimizerThread()); 173 ASSERT(!IsOptimizerThread());
174 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); 174 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
175 input_queue_semaphore_.Signal(); 175 input_queue_semaphore_.Signal();
176 stop_semaphore_.Wait(); 176 stop_semaphore_.Wait();
177 177
178 if (FLAG_concurrent_recompilation_delay != 0) { 178 if (FLAG_concurrent_recompilation_delay != 0) {
179 // Barrier when loading queue length is not necessary since the write 179 // Barrier when loading queue length is not necessary since the write
180 // happens in CompileNext on the same thread. 180 // happens in CompileNext on the same thread.
181 // This is used only for testing. 181 // This is used only for testing.
182 while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); 182 while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
183 InstallOptimizedFunctions(); 183 InstallOptimizedFunctions();
184 } else { 184 } else {
185 FlushInputQueue(false); 185 FlushInputQueue(false);
186 FlushOutputQueue(false); 186 FlushOutputQueue(false);
titzer 2013/09/24 09:42:01 Shouldn't you also empty the osr buffer here too?
Yang 2013/09/24 10:07:11 This is already done in FlushOutputQueue.
187 } 187 }
188 188
189 if (FLAG_trace_concurrent_recompilation) { 189 if (FLAG_trace_concurrent_recompilation) {
190 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); 190 double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
191 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); 191 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
192 } 192 }
193 193
194 if (FLAG_trace_osr && FLAG_concurrent_osr) { 194 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
195 FLAG_concurrent_osr) {
195 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); 196 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
196 } 197 }
197 198
198 Join(); 199 Join();
199 } 200 }
200 201
201 202
202 void OptimizingCompilerThread::InstallOptimizedFunctions() { 203 void OptimizingCompilerThread::InstallOptimizedFunctions() {
203 ASSERT(!IsOptimizerThread()); 204 ASSERT(!IsOptimizerThread());
204 HandleScope handle_scope(isolate_); 205 HandleScope handle_scope(isolate_);
205 206
206 OptimizingCompiler* compiler; 207 OptimizingCompiler* compiler;
207 while (true) { 208 while (true) {
208 { LockGuard<Mutex> access_queue(&queue_mutex_); 209 { LockGuard<Mutex> access_queue(&queue_mutex_);
209 if (!output_queue_.Dequeue(&compiler)) break; 210 if (!output_queue_.Dequeue(&compiler)) break;
210 } 211 }
211 Compiler::InstallOptimizedCode(compiler); 212 CompilationInfo* info = compiler->info();
213 if (info->osr_ast_id().IsNone()) {
214 Compiler::InstallOptimizedCode(compiler);
215 } else {
216 if (FLAG_trace_osr) {
217 PrintF("[COSR - ");
218 info->closure()->PrintName();
219 PrintF(" is ready for install and entry at AST id %d]\n",
220 info->osr_ast_id().ToInt());
221 }
222 osr_candidates_.RemoveElement(compiler);
223 AddToOsrBuffer(compiler);
224 BackEdgeTable::RemoveStackCheck(info);
225 }
212 } 226 }
213
214 // Remove the oldest OSR candidates that are ready so that we
215 // only have limited number of them waiting.
216 if (FLAG_concurrent_osr) RemoveStaleOSRCandidates();
217 } 227 }
218 228
219 229
220 void OptimizingCompilerThread::QueueForOptimization( 230 void OptimizingCompilerThread::QueueForOptimization(
221 OptimizingCompiler* optimizing_compiler) { 231 OptimizingCompiler* optimizing_compiler) {
222 ASSERT(IsQueueAvailable()); 232 ASSERT(IsQueueAvailable());
223 ASSERT(!IsOptimizerThread()); 233 ASSERT(!IsOptimizerThread());
224 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); 234 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
225 if (optimizing_compiler->info()->osr_ast_id().IsNone()) { 235 CompilationInfo* info = optimizing_compiler->info();
226 optimizing_compiler->info()->closure()->MarkInRecompileQueue(); 236 if (info->osr_ast_id().IsNone()) {
237 info->closure()->MarkInRecompileQueue();
227 } else { 238 } else {
228 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); 239 if (FLAG_trace_concurrent_recompilation) {
240 PrintF(" ** Queueing ");
241 info->closure()->PrintName();
242 PrintF(" for concurrent on-stack replacement.\n");
243 }
229 osr_candidates_.Add(optimizing_compiler); 244 osr_candidates_.Add(optimizing_compiler);
230 osr_attempts_++; 245 osr_attempts_++;
246 BackEdgeTable::AddStackCheck(info);
231 } 247 }
232 input_queue_.Enqueue(optimizing_compiler); 248 input_queue_.Enqueue(optimizing_compiler);
233 input_queue_semaphore_.Signal(); 249 input_queue_semaphore_.Signal();
234 } 250 }
235 251
236 252
237 OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate( 253 OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
238 Handle<JSFunction> function, uint32_t osr_pc_offset) { 254 Handle<JSFunction> function, uint32_t osr_pc_offset) {
239 ASSERT(!IsOptimizerThread()); 255 ASSERT(!IsOptimizerThread());
240 OptimizingCompiler* result = NULL; 256 OptimizingCompiler* result = NULL;
241 { LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); 257 for (int i = 0; i < kOsrBufferSize; i++) {
242 for (int i = 0; i < ready_for_osr_.length(); i++) { 258 result = osr_buffer_[i];
243 if (ready_for_osr_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { 259 if (result == NULL) continue;
244 osr_hits_++; 260 if (result->info()->HasSameOsrEntry(function, osr_pc_offset)) {
245 result = ready_for_osr_.Remove(i); 261 osr_hits_++;
246 break; 262 osr_buffer_[i] = NULL;
247 } 263 return result;
248 } 264 }
249 } 265 }
250 RemoveStaleOSRCandidates(); 266 return NULL;
251 return result;
252 } 267 }
253 268
254 269
255 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, 270 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
256 uint32_t osr_pc_offset) { 271 uint32_t osr_pc_offset) {
257 ASSERT(!IsOptimizerThread()); 272 ASSERT(!IsOptimizerThread());
258 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
259 for (int i = 0; i < osr_candidates_.length(); i++) { 273 for (int i = 0; i < osr_candidates_.length(); i++) {
260 if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) { 274 if (osr_candidates_[i]->info()->HasSameOsrEntry(function, osr_pc_offset)) {
261 return true; 275 return true;
262 } 276 }
263 } 277 }
264 return false; 278 return false;
265 } 279 }
266 280
267 281
268 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { 282 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
269 ASSERT(!IsOptimizerThread()); 283 ASSERT(!IsOptimizerThread());
270 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
271 for (int i = 0; i < osr_candidates_.length(); i++) { 284 for (int i = 0; i < osr_candidates_.length(); i++) {
272 if (*osr_candidates_[i]->info()->closure() == function) { 285 if (*osr_candidates_[i]->info()->closure() == function) {
273 return true; 286 return true;
274 } 287 }
275 } 288 }
276 return false; 289 return false;
277 } 290 }
278 291
279 292
280 void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) { 293 void OptimizingCompilerThread::AddToOsrBuffer(OptimizingCompiler* compiler) {
281 ASSERT(!IsOptimizerThread()); 294 ASSERT(!IsOptimizerThread());
282 LockGuard<Mutex> access_osr_lists(&osr_list_mutex_); 295 OptimizingCompiler* stale = osr_buffer_[osr_cursor_];
283 while (ready_for_osr_.length() > limit) { 296 if (stale != NULL) {
284 OptimizingCompiler* compiler = ready_for_osr_.Remove(0); 297 CompilationInfo* info = stale->info();
285 CompilationInfo* throw_away = compiler->info();
286 if (FLAG_trace_osr) { 298 if (FLAG_trace_osr) {
287 PrintF("[COSR - Discarded "); 299 PrintF("[COSR - Discarded ");
288 throw_away->closure()->PrintName(); 300 info->closure()->PrintName();
289 PrintF(", AST id %d]\n", 301 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt());
290 throw_away->osr_ast_id().ToInt());
291 } 302 }
292 delete throw_away; 303 BackEdgeTable::RemoveStackCheck(info);
304 delete info;
titzer 2013/09/24 09:42:01 Maybe you want DisposeOptimizingCompiler(stale, fa
Yang 2013/09/24 10:07:11 Done.
293 } 305 }
306
307 osr_buffer_[osr_cursor_] = compiler;
308 osr_cursor_ = (osr_cursor_ + 1) % kOsrBufferSize;
294 } 309 }
295 310
296 311
297 #ifdef DEBUG 312 #ifdef DEBUG
298 bool OptimizingCompilerThread::IsOptimizerThread() { 313 bool OptimizingCompilerThread::IsOptimizerThread() {
299 if (!FLAG_concurrent_recompilation) return false; 314 if (!FLAG_concurrent_recompilation) return false;
300 LockGuard<Mutex> lock_guard(&thread_id_mutex_); 315 LockGuard<Mutex> lock_guard(&thread_id_mutex_);
301 return ThreadId::Current().ToInteger() == thread_id_; 316 return ThreadId::Current().ToInteger() == thread_id_;
302 } 317 }
303 #endif 318 #endif
304 319
305 320
306 } } // namespace v8::internal 321 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698