| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 99 | 99 |
| 100 CompileNext(); | 100 CompileNext(); |
| 101 | 101 |
| 102 if (FLAG_trace_concurrent_recompilation) { | 102 if (FLAG_trace_concurrent_recompilation) { |
| 103 time_spent_compiling_ += compiling_timer.Elapsed(); | 103 time_spent_compiling_ += compiling_timer.Elapsed(); |
| 104 } | 104 } |
| 105 } | 105 } |
| 106 } | 106 } |
| 107 | 107 |
| 108 | 108 |
| 109 RecompileJob* OptimizingCompilerThread::NextInput() { | 109 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { |
| 110 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); | 110 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); |
| 111 if (input_queue_length_ == 0) return NULL; | 111 if (input_queue_length_ == 0) return NULL; |
| 112 RecompileJob* job = input_queue_[InputQueueIndex(0)]; | 112 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 113 ASSERT_NE(NULL, job); | 113 ASSERT_NE(NULL, job); |
| 114 input_queue_shift_ = InputQueueIndex(1); | 114 input_queue_shift_ = InputQueueIndex(1); |
| 115 input_queue_length_--; | 115 input_queue_length_--; |
| 116 return job; | 116 return job; |
| 117 } | 117 } |
| 118 | 118 |
| 119 | 119 |
| 120 void OptimizingCompilerThread::CompileNext() { | 120 void OptimizingCompilerThread::CompileNext() { |
| 121 RecompileJob* job = NextInput(); | 121 OptimizedCompileJob* job = NextInput(); |
| 122 ASSERT_NE(NULL, job); | 122 ASSERT_NE(NULL, job); |
| 123 | 123 |
| 124 // The function may have already been optimized by OSR. Simply continue. | 124 // The function may have already been optimized by OSR. Simply continue. |
| 125 RecompileJob::Status status = job->OptimizeGraph(); | 125 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
| 126 USE(status); // Prevent an unused-variable error in release mode. | 126 USE(status); // Prevent an unused-variable error in release mode. |
| 127 ASSERT(status != RecompileJob::FAILED); | 127 ASSERT(status != OptimizedCompileJob::FAILED); |
| 128 | 128 |
| 129 // The function may have already been optimized by OSR. Simply continue. | 129 // The function may have already been optimized by OSR. Simply continue. |
| 130 // Use a mutex to make sure that functions marked for install | 130 // Use a mutex to make sure that functions marked for install |
| 131 // are always also queued. | 131 // are always also queued. |
| 132 output_queue_.Enqueue(job); | 132 output_queue_.Enqueue(job); |
| 133 isolate_->stack_guard()->RequestInstallCode(); | 133 isolate_->stack_guard()->RequestInstallCode(); |
| 134 } | 134 } |
| 135 | 135 |
| 136 | 136 |
| 137 static void DisposeRecompileJob(RecompileJob* job, | 137 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, |
| 138 bool restore_function_code) { | 138 bool restore_function_code) { |
| 139 // The recompile job is allocated in the CompilationInfo's zone. | 139 // The recompile job is allocated in the CompilationInfo's zone. |
| 140 CompilationInfo* info = job->info(); | 140 CompilationInfo* info = job->info(); |
| 141 if (restore_function_code) { | 141 if (restore_function_code) { |
| 142 if (info->is_osr()) { | 142 if (info->is_osr()) { |
| 143 if (!job->IsWaitingForInstall()) BackEdgeTable::RemoveStackCheck(info); | 143 if (!job->IsWaitingForInstall()) { |
| 144 // Remove stack check that guards OSR entry on original code. |
| 145 Handle<Code> code = info->unoptimized_code(); |
| 146 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); |
| 147 BackEdgeTable::RemoveStackCheck(code, offset); |
| 148 } |
| 144 } else { | 149 } else { |
| 145 Handle<JSFunction> function = info->closure(); | 150 Handle<JSFunction> function = info->closure(); |
| 146 function->ReplaceCode(function->shared()->code()); | 151 function->ReplaceCode(function->shared()->code()); |
| 147 } | 152 } |
| 148 } | 153 } |
| 149 delete info; | 154 delete info; |
| 150 } | 155 } |
| 151 | 156 |
| 152 | 157 |
| 153 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 158 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
| 154 RecompileJob* job; | 159 OptimizedCompileJob* job; |
| 155 while ((job = NextInput())) { | 160 while ((job = NextInput())) { |
| 156 // This should not block, since we have one signal on the input queue | 161 // This should not block, since we have one signal on the input queue |
| 157 // semaphore corresponding to each element in the input queue. | 162 // semaphore corresponding to each element in the input queue. |
| 158 input_queue_semaphore_.Wait(); | 163 input_queue_semaphore_.Wait(); |
| 159 // OSR jobs are dealt with separately. | 164 // OSR jobs are dealt with separately. |
| 160 if (!job->info()->is_osr()) { | 165 if (!job->info()->is_osr()) { |
| 161 DisposeRecompileJob(job, restore_function_code); | 166 DisposeOptimizedCompileJob(job, restore_function_code); |
| 162 } | 167 } |
| 163 } | 168 } |
| 164 } | 169 } |
| 165 | 170 |
| 166 | 171 |
| 167 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | 172 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { |
| 168 RecompileJob* job; | 173 OptimizedCompileJob* job; |
| 169 while (output_queue_.Dequeue(&job)) { | 174 while (output_queue_.Dequeue(&job)) { |
| 170 // OSR jobs are dealt with separately. | 175 // OSR jobs are dealt with separately. |
| 171 if (!job->info()->is_osr()) { | 176 if (!job->info()->is_osr()) { |
| 172 DisposeRecompileJob(job, restore_function_code); | 177 DisposeOptimizedCompileJob(job, restore_function_code); |
| 173 } | 178 } |
| 174 } | 179 } |
| 175 } | 180 } |
| 176 | 181 |
| 177 | 182 |
| 178 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { | 183 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { |
| 179 for (int i = 0; i < osr_buffer_capacity_; i++) { | 184 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 180 if (osr_buffer_[i] != NULL) { | 185 if (osr_buffer_[i] != NULL) { |
| 181 DisposeRecompileJob(osr_buffer_[i], restore_function_code); | 186 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); |
| 182 osr_buffer_[i] = NULL; | 187 osr_buffer_[i] = NULL; |
| 183 } | 188 } |
| 184 } | 189 } |
| 185 } | 190 } |
| 186 | 191 |
| 187 | 192 |
| 188 void OptimizingCompilerThread::Flush() { | 193 void OptimizingCompilerThread::Flush() { |
| 189 ASSERT(!IsOptimizerThread()); | 194 ASSERT(!IsOptimizerThread()); |
| 190 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); | 195 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); |
| 191 if (FLAG_block_concurrent_recompilation) Unblock(); | 196 if (FLAG_block_concurrent_recompilation) Unblock(); |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 229 } | 234 } |
| 230 | 235 |
| 231 Join(); | 236 Join(); |
| 232 } | 237 } |
| 233 | 238 |
| 234 | 239 |
| 235 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 240 void OptimizingCompilerThread::InstallOptimizedFunctions() { |
| 236 ASSERT(!IsOptimizerThread()); | 241 ASSERT(!IsOptimizerThread()); |
| 237 HandleScope handle_scope(isolate_); | 242 HandleScope handle_scope(isolate_); |
| 238 | 243 |
| 239 RecompileJob* job; | 244 OptimizedCompileJob* job; |
| 240 while (output_queue_.Dequeue(&job)) { | 245 while (output_queue_.Dequeue(&job)) { |
| 241 CompilationInfo* info = job->info(); | 246 CompilationInfo* info = job->info(); |
| 247 Handle<JSFunction> function(*info->closure()); |
| 242 if (info->is_osr()) { | 248 if (info->is_osr()) { |
| 243 if (FLAG_trace_osr) { | 249 if (FLAG_trace_osr) { |
| 244 PrintF("[COSR - "); | 250 PrintF("[COSR - "); |
| 245 info->closure()->PrintName(); | 251 info->closure()->PrintName(); |
| 246 PrintF(" is ready for install and entry at AST id %d]\n", | 252 PrintF(" is ready for install and entry at AST id %d]\n", |
| 247 info->osr_ast_id().ToInt()); | 253 info->osr_ast_id().ToInt()); |
| 248 } | 254 } |
| 249 job->WaitForInstall(); | 255 job->WaitForInstall(); |
| 250 BackEdgeTable::RemoveStackCheck(info); | 256 // Remove stack check that guards OSR entry on original code. |
| 257 Handle<Code> code = info->unoptimized_code(); |
| 258 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); |
| 259 BackEdgeTable::RemoveStackCheck(code, offset); |
| 251 } else { | 260 } else { |
| 252 Compiler::InstallOptimizedCode(job); | 261 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job); |
| 262 function->ReplaceCode( |
| 263 code.is_null() ? function->shared()->code() : *code); |
| 253 } | 264 } |
| 254 } | 265 } |
| 255 } | 266 } |
| 256 | 267 |
| 257 | 268 |
| 258 void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) { | 269 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { |
| 259 ASSERT(IsQueueAvailable()); | 270 ASSERT(IsQueueAvailable()); |
| 260 ASSERT(!IsOptimizerThread()); | 271 ASSERT(!IsOptimizerThread()); |
| 261 CompilationInfo* info = job->info(); | 272 CompilationInfo* info = job->info(); |
| 262 if (info->is_osr()) { | 273 if (info->is_osr()) { |
| 263 if (FLAG_trace_concurrent_recompilation) { | |
| 264 PrintF(" ** Queueing "); | |
| 265 info->closure()->PrintName(); | |
| 266 PrintF(" for concurrent on-stack replacement.\n"); | |
| 267 } | |
| 268 osr_attempts_++; | 274 osr_attempts_++; |
| 269 BackEdgeTable::AddStackCheck(info); | |
| 270 AddToOsrBuffer(job); | 275 AddToOsrBuffer(job); |
| 271 // Add job to the front of the input queue. | 276 // Add job to the front of the input queue. |
| 272 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); | 277 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| 273 ASSERT_LT(input_queue_length_, input_queue_capacity_); | 278 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 274 // Move shift_ back by one. | 279 // Move shift_ back by one. |
| 275 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 280 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| 276 input_queue_[InputQueueIndex(0)] = job; | 281 input_queue_[InputQueueIndex(0)] = job; |
| 277 input_queue_length_++; | 282 input_queue_length_++; |
| 278 } else { | 283 } else { |
| 279 info->closure()->MarkInRecompileQueue(); | |
| 280 // Add job to the back of the input queue. | 284 // Add job to the back of the input queue. |
| 281 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); | 285 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| 282 ASSERT_LT(input_queue_length_, input_queue_capacity_); | 286 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 283 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 287 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 284 input_queue_length_++; | 288 input_queue_length_++; |
| 285 } | 289 } |
| 286 if (FLAG_block_concurrent_recompilation) { | 290 if (FLAG_block_concurrent_recompilation) { |
| 287 blocked_jobs_++; | 291 blocked_jobs_++; |
| 288 } else { | 292 } else { |
| 289 input_queue_semaphore_.Signal(); | 293 input_queue_semaphore_.Signal(); |
| 290 } | 294 } |
| 291 } | 295 } |
| 292 | 296 |
| 293 | 297 |
| 294 void OptimizingCompilerThread::Unblock() { | 298 void OptimizingCompilerThread::Unblock() { |
| 295 ASSERT(!IsOptimizerThread()); | 299 ASSERT(!IsOptimizerThread()); |
| 296 while (blocked_jobs_ > 0) { | 300 while (blocked_jobs_ > 0) { |
| 297 input_queue_semaphore_.Signal(); | 301 input_queue_semaphore_.Signal(); |
| 298 blocked_jobs_--; | 302 blocked_jobs_--; |
| 299 } | 303 } |
| 300 } | 304 } |
| 301 | 305 |
| 302 | 306 |
| 303 RecompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 307 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( |
| 304 Handle<JSFunction> function, uint32_t osr_pc_offset) { | 308 Handle<JSFunction> function, BailoutId osr_ast_id) { |
| 305 ASSERT(!IsOptimizerThread()); | 309 ASSERT(!IsOptimizerThread()); |
| 306 for (int i = 0; i < osr_buffer_capacity_; i++) { | 310 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 307 RecompileJob* current = osr_buffer_[i]; | 311 OptimizedCompileJob* current = osr_buffer_[i]; |
| 308 if (current != NULL && | 312 if (current != NULL && |
| 309 current->IsWaitingForInstall() && | 313 current->IsWaitingForInstall() && |
| 310 current->info()->HasSameOsrEntry(function, osr_pc_offset)) { | 314 current->info()->HasSameOsrEntry(function, osr_ast_id)) { |
| 311 osr_hits_++; | 315 osr_hits_++; |
| 312 osr_buffer_[i] = NULL; | 316 osr_buffer_[i] = NULL; |
| 313 return current; | 317 return current; |
| 314 } | 318 } |
| 315 } | 319 } |
| 316 return NULL; | 320 return NULL; |
| 317 } | 321 } |
| 318 | 322 |
| 319 | 323 |
| 320 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, | 324 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, |
| 321 uint32_t osr_pc_offset) { | 325 BailoutId osr_ast_id) { |
| 322 ASSERT(!IsOptimizerThread()); | 326 ASSERT(!IsOptimizerThread()); |
| 323 for (int i = 0; i < osr_buffer_capacity_; i++) { | 327 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 324 RecompileJob* current = osr_buffer_[i]; | 328 OptimizedCompileJob* current = osr_buffer_[i]; |
| 325 if (current != NULL && | 329 if (current != NULL && |
| 326 current->info()->HasSameOsrEntry(function, osr_pc_offset)) { | 330 current->info()->HasSameOsrEntry(function, osr_ast_id)) { |
| 327 return !current->IsWaitingForInstall(); | 331 return !current->IsWaitingForInstall(); |
| 328 } | 332 } |
| 329 } | 333 } |
| 330 return false; | 334 return false; |
| 331 } | 335 } |
| 332 | 336 |
| 333 | 337 |
| 334 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { | 338 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { |
| 335 ASSERT(!IsOptimizerThread()); | 339 ASSERT(!IsOptimizerThread()); |
| 336 for (int i = 0; i < osr_buffer_capacity_; i++) { | 340 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 337 RecompileJob* current = osr_buffer_[i]; | 341 OptimizedCompileJob* current = osr_buffer_[i]; |
| 338 if (current != NULL && *current->info()->closure() == function) { | 342 if (current != NULL && *current->info()->closure() == function) { |
| 339 return !current->IsWaitingForInstall(); | 343 return !current->IsWaitingForInstall(); |
| 340 } | 344 } |
| 341 } | 345 } |
| 342 return false; | 346 return false; |
| 343 } | 347 } |
| 344 | 348 |
| 345 | 349 |
| 346 void OptimizingCompilerThread::AddToOsrBuffer(RecompileJob* job) { | 350 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) { |
| 347 ASSERT(!IsOptimizerThread()); | 351 ASSERT(!IsOptimizerThread()); |
| 348 // Find the next slot that is empty or has a stale job. | 352 // Find the next slot that is empty or has a stale job. |
| 349 RecompileJob* stale = NULL; | 353 OptimizedCompileJob* stale = NULL; |
| 350 while (true) { | 354 while (true) { |
| 351 stale = osr_buffer_[osr_buffer_cursor_]; | 355 stale = osr_buffer_[osr_buffer_cursor_]; |
| 352 if (stale == NULL || stale->IsWaitingForInstall()) break; | 356 if (stale == NULL || stale->IsWaitingForInstall()) break; |
| 353 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | 357 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| 354 } | 358 } |
| 355 | 359 |
| 356 // Add to found slot and dispose the evicted job. | 360 // Add to found slot and dispose the evicted job. |
| 357 if (stale != NULL) { | 361 if (stale != NULL) { |
| 358 ASSERT(stale->IsWaitingForInstall()); | 362 ASSERT(stale->IsWaitingForInstall()); |
| 359 CompilationInfo* info = stale->info(); | 363 CompilationInfo* info = stale->info(); |
| 360 if (FLAG_trace_osr) { | 364 if (FLAG_trace_osr) { |
| 361 PrintF("[COSR - Discarded "); | 365 PrintF("[COSR - Discarded "); |
| 362 info->closure()->PrintName(); | 366 info->closure()->PrintName(); |
| 363 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); | 367 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); |
| 364 } | 368 } |
| 365 DisposeRecompileJob(stale, false); | 369 DisposeOptimizedCompileJob(stale, false); |
| 366 } | 370 } |
| 367 osr_buffer_[osr_buffer_cursor_] = job; | 371 osr_buffer_[osr_buffer_cursor_] = job; |
| 368 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | 372 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| 369 } | 373 } |
| 370 | 374 |
| 371 | 375 |
| 372 #ifdef DEBUG | 376 #ifdef DEBUG |
| 373 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { | 377 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { |
| 374 return isolate->concurrent_recompilation_enabled() && | 378 return isolate->concurrent_recompilation_enabled() && |
| 375 isolate->optimizing_compiler_thread()->IsOptimizerThread(); | 379 isolate->optimizing_compiler_thread()->IsOptimizerThread(); |
| 376 } | 380 } |
| 377 | 381 |
| 378 | 382 |
| 379 bool OptimizingCompilerThread::IsOptimizerThread() { | 383 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 380 LockGuard<Mutex> lock_guard(&thread_id_mutex_); | 384 LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
| 381 return ThreadId::Current().ToInteger() == thread_id_; | 385 return ThreadId::Current().ToInteger() == thread_id_; |
| 382 } | 386 } |
| 383 #endif | 387 #endif |
| 384 | 388 |
| 385 | 389 |
| 386 } } // namespace v8::internal | 390 } } // namespace v8::internal |
| OLD | NEW |