| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 67 | 67 |
| 68 while (true) { | 68 while (true) { |
| 69 input_queue_semaphore_.Wait(); | 69 input_queue_semaphore_.Wait(); |
| 70 Logger::TimerEventScope timer( | 70 Logger::TimerEventScope timer( |
| 71 isolate_, Logger::TimerEventScope::v8_recompile_concurrent); | 71 isolate_, Logger::TimerEventScope::v8_recompile_concurrent); |
| 72 | 72 |
| 73 if (FLAG_concurrent_recompilation_delay != 0) { | 73 if (FLAG_concurrent_recompilation_delay != 0) { |
| 74 OS::Sleep(FLAG_concurrent_recompilation_delay); | 74 OS::Sleep(FLAG_concurrent_recompilation_delay); |
| 75 } | 75 } |
| 76 | 76 |
| 77 switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) { | 77 switch (static_cast<LoopSwitch>(Acquire_Load(&loop_switch_))) { |
| 78 case CONTINUE: | 78 case CONTINUE: |
| 79 break; | 79 break; |
| 80 case STOP: | 80 case STOP: |
| 81 if (FLAG_trace_concurrent_recompilation) { | 81 if (FLAG_trace_concurrent_recompilation) { |
| 82 time_spent_total_ = total_timer.Elapsed(); | 82 time_spent_total_ = total_timer.Elapsed(); |
| 83 } | 83 } |
| 84 stop_semaphore_.Signal(); | 84 stop_semaphore_.Signal(); |
| 85 return; | 85 return; |
| 86 case FLUSH: | 86 case FLUSH: |
| 87 // The main thread is blocked, waiting for the stop semaphore. | 87 // The main thread is blocked, waiting for the stop semaphore. |
| 88 { AllowHandleDereference allow_handle_dereference; | 88 { AllowHandleDereference allow_handle_dereference; |
| 89 FlushInputQueue(true); | 89 FlushInputQueue(RESTORE_FUNCTION_CODE); |
| 90 } | 90 } |
| 91 Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); | 91 Release_Store(&loop_switch_, static_cast<AtomicWord>(CONTINUE)); |
| 92 stop_semaphore_.Signal(); | 92 stop_semaphore_.Signal(); |
| 93 // Return to start of consumer loop. | 93 // Return to start of consumer loop. |
| 94 continue; | 94 continue; |
| 95 } | 95 } |
| 96 | 96 |
| 97 ElapsedTimer compiling_timer; | 97 ElapsedTimer compiling_timer; |
| 98 if (FLAG_trace_concurrent_recompilation) compiling_timer.Start(); | 98 if (FLAG_trace_concurrent_recompilation) compiling_timer.Start(); |
| 99 | 99 |
| 100 CompileNext(); | 100 CompileNext(); |
| 101 | 101 |
| 102 if (FLAG_trace_concurrent_recompilation) { | 102 if (FLAG_trace_concurrent_recompilation) { |
| 103 time_spent_compiling_ += compiling_timer.Elapsed(); | 103 time_spent_compiling_ += compiling_timer.Elapsed(); |
| 104 } | 104 } |
| 105 } | 105 } |
| 106 } | 106 } |
| 107 | 107 |
| 108 | 108 |
| 109 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { | 109 OptimizedCompileJob* OptimizingCompilerThread::NextInput() { |
| 110 LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); | 110 LockGuard<Mutex> access_input_queue_(&mutex_); |
| 111 if (input_queue_length_ == 0) return NULL; | 111 if (input_queue_length_ == 0) return NULL; |
| 112 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 112 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 113 ASSERT_NE(NULL, job); | 113 ASSERT_NE(NULL, job); |
| 114 input_queue_shift_ = InputQueueIndex(1); | 114 input_queue_shift_ = InputQueueIndex(1); |
| 115 input_queue_length_--; | 115 input_queue_length_--; |
| 116 return job; | 116 return job; |
| 117 } | 117 } |
| 118 | 118 |
| 119 | 119 |
| 120 void OptimizingCompilerThread::CompileNext() { | 120 void OptimizingCompilerThread::CompileNext() { |
| 121 OptimizedCompileJob* job = NextInput(); | 121 OptimizedCompileJob* job = NextInput(); |
| 122 ASSERT_NE(NULL, job); | 122 ASSERT_NE(NULL, job); |
| 123 | 123 |
| 124 // The function may have already been optimized by OSR. Simply continue. | 124 // The function may have already been optimized by OSR. Simply continue. |
| 125 OptimizedCompileJob::Status status = job->OptimizeGraph(); | 125 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
| 126 USE(status); // Prevent an unused-variable error in release mode. | 126 USE(status); // Prevent an unused-variable error in release mode. |
| 127 ASSERT(status != OptimizedCompileJob::FAILED); | 127 ASSERT(status != OptimizedCompileJob::FAILED); |
| 128 | 128 |
| 129 // The function may have already been optimized by OSR. Simply continue. | 129 // The function may have already been optimized by OSR. Simply continue. |
| 130 // Use a mutex to make sure that functions marked for install | 130 // Use a mutex to make sure that functions marked for install |
| 131 // are always also queued. | 131 // are always also queued. |
| 132 output_queue_.Enqueue(job); | 132 output_queue_.Enqueue(job); |
| 133 isolate_->stack_guard()->RequestInstallCode(); | 133 isolate_->stack_guard()->RequestInstallCode(); |
| 134 } | 134 } |
| 135 | 135 |
| 136 | 136 |
| 137 static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, | 137 void OptimizingCompilerThread::DisposeOptimizedCompileJob( |
| 138 bool restore_function_code) { | 138 OptimizedCompileJob* job, FlushMode mode) { |
| 139 // The recompile job is allocated in the CompilationInfo's zone. | 139 // The recompile job is allocated in the CompilationInfo's zone. |
| 140 CompilationInfo* info = job->info(); | 140 CompilationInfo* info = job->info(); |
| 141 if (restore_function_code) { | 141 if (mode == RESTORE_FUNCTION_CODE) { |
| 142 if (info->is_osr()) { | 142 if (info->is_osr()) { |
| 143 if (!job->IsWaitingForInstall()) { | 143 if (!job->IsWaitingForInstall()) { |
| 144 // Remove stack check that guards OSR entry on original code. | 144 // Remove stack check that guards OSR entry on original code. |
| 145 Handle<Code> code = info->unoptimized_code(); | 145 Handle<Code> code = info->unoptimized_code(); |
| 146 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | 146 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); |
| 147 BackEdgeTable::RemoveStackCheck(code, offset); | 147 BackEdgeTable::RemoveStackCheck(code, offset); |
| 148 } | 148 } |
| 149 } else { | 149 } else { |
| 150 Handle<JSFunction> function = info->closure(); | 150 Handle<JSFunction> function = info->closure(); |
| 151 function->ReplaceCode(function->shared()->code()); | 151 function->ReplaceCode(function->shared()->code()); |
| 152 } | 152 } |
| 153 } | 153 } |
| 154 delete info; | 154 delete info; |
| 155 } | 155 } |
| 156 | 156 |
| 157 | 157 |
| 158 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 158 void OptimizingCompilerThread::FlushInputQueue(FlushMode mode) { |
| 159 OptimizedCompileJob* job; | 159 OptimizedCompileJob* job; |
| 160 while ((job = NextInput())) { | 160 while ((job = NextInput())) { |
| 161 // This should not block, since we have one signal on the input queue | 161 // This should not block, since we have one signal on the input queue |
| 162 // semaphore corresponding to each element in the input queue. | 162 // semaphore corresponding to each element in the input queue. |
| 163 input_queue_semaphore_.Wait(); | 163 input_queue_semaphore_.Wait(); |
| 164 // OSR jobs are dealt with separately. | 164 // OSR jobs are dealt with separately. |
| 165 if (!job->info()->is_osr()) { | 165 if (!job->info()->is_osr()) { |
| 166 DisposeOptimizedCompileJob(job, restore_function_code); | 166 DisposeOptimizedCompileJob(job, mode); |
| 167 } | 167 } |
| 168 } | 168 } |
| 169 } | 169 } |
| 170 | 170 |
| 171 | 171 |
| 172 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | 172 void OptimizingCompilerThread::FlushOutputQueue(FlushMode mode) { |
| 173 OptimizedCompileJob* job; | 173 OptimizedCompileJob* job; |
| 174 while (output_queue_.Dequeue(&job)) { | 174 while (output_queue_.Dequeue(&job)) { |
| 175 // OSR jobs are dealt with separately. | 175 // OSR jobs are dealt with separately. |
| 176 if (!job->info()->is_osr()) { | 176 if (!job->info()->is_osr()) { |
| 177 DisposeOptimizedCompileJob(job, restore_function_code); | 177 DisposeOptimizedCompileJob(job, mode); |
| 178 } | 178 } |
| 179 } | 179 } |
| 180 } | 180 } |
| 181 | 181 |
| 182 | 182 |
| 183 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { | 183 void OptimizingCompilerThread::FlushOsrBuffer(FlushMode mode) { |
| 184 for (int i = 0; i < osr_buffer_capacity_; i++) { | 184 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 185 if (osr_buffer_[i] != NULL) { | 185 if (osr_buffer_[i] != NULL) { |
| 186 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); | 186 DisposeOptimizedCompileJob(osr_buffer_[i], mode); |
| 187 osr_buffer_[i] = NULL; | 187 osr_buffer_[i] = NULL; |
| 188 } | 188 } |
| 189 } | 189 } |
| 190 } | 190 } |
| 191 | 191 |
| 192 | 192 |
| 193 void OptimizingCompilerThread::Flush() { | 193 void OptimizingCompilerThread::SetSwitchAndInterceptInterrupt( |
| 194 LoopSwitch loop_switch) { |
| 195 // The compiler thread may be waiting for the main (this) thread to handle |
| 196 // a stack check interrupt when entering a SynchronizedScope. |
| 197 // Use a mutex when changing the loop switch and checking the stack guard |
| 198 // state to avoid race with PauseMainThread. |
| 199 { LockGuard<Mutex> set_switch_and_check_interrupt(&mutex_); |
| 200 Release_Store(&loop_switch_, static_cast<AtomicWord>(loop_switch)); |
| 201 if (!isolate_->stack_guard()->IsCompilerSyncRequest()) return; |
| 202 } |
| 203 isolate_->stack_guard()->Continue(COMPILER_SYNC); |
| 204 YieldToCompilerThread(); |
| 205 } |
| 206 |
| 207 |
| 208 void OptimizingCompilerThread::PrepareInterruption(LoopSwitch loop_switch) { |
| 194 ASSERT(!IsOptimizerThread()); | 209 ASSERT(!IsOptimizerThread()); |
| 195 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); | |
| 196 if (FLAG_block_concurrent_recompilation) Unblock(); | 210 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 211 SetSwitchAndInterceptInterrupt(loop_switch); |
| 197 input_queue_semaphore_.Signal(); | 212 input_queue_semaphore_.Signal(); |
| 198 stop_semaphore_.Wait(); | 213 stop_semaphore_.Wait(); |
| 199 FlushOutputQueue(true); | 214 } |
| 200 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 215 |
| 216 |
| 217 void OptimizingCompilerThread::Flush() { |
| 218 PrepareInterruption(FLUSH); |
| 219 FlushOutputQueue(RESTORE_FUNCTION_CODE); |
| 220 if (FLAG_concurrent_osr) FlushOsrBuffer(RESTORE_FUNCTION_CODE); |
| 201 if (FLAG_trace_concurrent_recompilation) { | 221 if (FLAG_trace_concurrent_recompilation) { |
| 202 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 222 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
| 203 } | 223 } |
| 204 } | 224 } |
| 205 | 225 |
| 206 | 226 |
| 207 void OptimizingCompilerThread::Stop() { | 227 void OptimizingCompilerThread::Stop() { |
| 208 ASSERT(!IsOptimizerThread()); | 228 PrepareInterruption(STOP); |
| 209 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); | |
| 210 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 211 input_queue_semaphore_.Signal(); | |
| 212 stop_semaphore_.Wait(); | |
| 213 | |
| 214 if (FLAG_concurrent_recompilation_delay != 0) { | 229 if (FLAG_concurrent_recompilation_delay != 0) { |
| 215 // At this point the optimizing compiler thread's event loop has stopped. | 230 // At this point the optimizing compiler thread's event loop has stopped. |
| 216 // There is no need for a mutex when reading input_queue_length_. | 231 // There is no need for a mutex when reading input_queue_length_. |
| 217 while (input_queue_length_ > 0) CompileNext(); | 232 while (input_queue_length_ > 0) CompileNext(); |
| 218 InstallOptimizedFunctions(); | 233 InstallOptimizedFunctions(); |
| 219 } else { | 234 } else { |
| 220 FlushInputQueue(false); | 235 FlushInputQueue(DO_NOT_RESTORE_FUNCTION_CODE); |
| 221 FlushOutputQueue(false); | 236 FlushOutputQueue(DO_NOT_RESTORE_FUNCTION_CODE); |
| 222 } | 237 } |
| 223 | 238 |
| 224 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 239 if (FLAG_concurrent_osr) FlushOsrBuffer(DO_NOT_RESTORE_FUNCTION_CODE); |
| 225 | 240 |
| 226 if (FLAG_trace_concurrent_recompilation) { | 241 if (FLAG_trace_concurrent_recompilation) { |
| 227 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); | 242 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); |
| 228 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 243 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
| 229 } | 244 } |
| 230 | 245 |
| 231 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && | 246 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && |
| 232 FLAG_concurrent_osr) { | 247 FLAG_concurrent_osr) { |
| 233 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); | 248 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); |
| 234 } | 249 } |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 267 | 282 |
| 268 | 283 |
| 269 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { | 284 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { |
| 270 ASSERT(IsQueueAvailable()); | 285 ASSERT(IsQueueAvailable()); |
| 271 ASSERT(!IsOptimizerThread()); | 286 ASSERT(!IsOptimizerThread()); |
| 272 CompilationInfo* info = job->info(); | 287 CompilationInfo* info = job->info(); |
| 273 if (info->is_osr()) { | 288 if (info->is_osr()) { |
| 274 osr_attempts_++; | 289 osr_attempts_++; |
| 275 AddToOsrBuffer(job); | 290 AddToOsrBuffer(job); |
| 276 // Add job to the front of the input queue. | 291 // Add job to the front of the input queue. |
| 277 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); | 292 LockGuard<Mutex> access_input_queue(&mutex_); |
| 278 ASSERT_LT(input_queue_length_, input_queue_capacity_); | 293 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 279 // Move shift_ back by one. | 294 // Move shift_ back by one. |
| 280 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 295 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| 281 input_queue_[InputQueueIndex(0)] = job; | 296 input_queue_[InputQueueIndex(0)] = job; |
| 282 input_queue_length_++; | 297 input_queue_length_++; |
| 283 } else { | 298 } else { |
| 284 // Add job to the back of the input queue. | 299 // Add job to the back of the input queue. |
| 285 LockGuard<Mutex> access_input_queue(&input_queue_mutex_); | 300 LockGuard<Mutex> access_input_queue(&mutex_); |
| 286 ASSERT_LT(input_queue_length_, input_queue_capacity_); | 301 ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| 287 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 302 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 288 input_queue_length_++; | 303 input_queue_length_++; |
| 289 } | 304 } |
| 290 if (FLAG_block_concurrent_recompilation) { | 305 if (FLAG_block_concurrent_recompilation) { |
| 291 blocked_jobs_++; | 306 blocked_jobs_++; |
| 292 } else { | 307 } else { |
| 293 input_queue_semaphore_.Signal(); | 308 input_queue_semaphore_.Signal(); |
| 294 } | 309 } |
| 295 } | 310 } |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 359 | 374 |
| 360 // Add to found slot and dispose the evicted job. | 375 // Add to found slot and dispose the evicted job. |
| 361 if (stale != NULL) { | 376 if (stale != NULL) { |
| 362 ASSERT(stale->IsWaitingForInstall()); | 377 ASSERT(stale->IsWaitingForInstall()); |
| 363 CompilationInfo* info = stale->info(); | 378 CompilationInfo* info = stale->info(); |
| 364 if (FLAG_trace_osr) { | 379 if (FLAG_trace_osr) { |
| 365 PrintF("[COSR - Discarded "); | 380 PrintF("[COSR - Discarded "); |
| 366 info->closure()->PrintName(); | 381 info->closure()->PrintName(); |
| 367 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); | 382 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); |
| 368 } | 383 } |
| 369 DisposeOptimizedCompileJob(stale, false); | 384 DisposeOptimizedCompileJob(stale, DO_NOT_RESTORE_FUNCTION_CODE); |
| 370 } | 385 } |
| 371 osr_buffer_[osr_buffer_cursor_] = job; | 386 osr_buffer_[osr_buffer_cursor_] = job; |
| 372 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | 387 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| 373 } | 388 } |
| 374 | 389 |
| 375 | 390 |
| 391 void OptimizingCompilerThread::PauseMainThread() { |
| 392 ASSERT(IsOptimizerThread()); |
| 393 // Request a stack check interrupt if we are not flushing or stopping. |
| 394 // Make sure the state does not change after the check using a mutex. |
| 395 { LockGuard<Mutex> check_switch_and_set_interrupt(&mutex_); |
| 396 if (static_cast<LoopSwitch>(Acquire_Load(&loop_switch_)) != CONTINUE) { |
| 397 return; |
| 398 } |
| 399 isolate_->stack_guard()->RequestCompilerSync(); |
| 400 } |
| 401 compiler_thread_semaphore_.Wait(); |
| 402 } |
| 403 |
| 404 |
| 405 void OptimizingCompilerThread::ContinueMainThread() { |
| 406 ASSERT(IsOptimizerThread()); |
| 407 main_thread_semaphore_.Signal(); |
| 408 } |
| 409 |
| 410 |
| 411 void OptimizingCompilerThread::YieldToCompilerThread() { |
| 412 ASSERT(!IsOptimizerThread()); |
| 413 Logger::TimerEventScope scope( |
| 414 isolate_, Logger::TimerEventScope::v8_recompile_synchronized); |
| 415 compiler_thread_semaphore_.Signal(); |
| 416 main_thread_semaphore_.Wait(); |
| 417 } |
| 418 |
| 419 |
| 420 OptimizingCompilerThread::SynchronizedScope::SynchronizedScope( |
| 421 CompilationInfo* info) : info_(info) { |
| 422 Isolate* isolate = info_->isolate(); |
| 423 if (isolate->concurrent_recompilation_enabled() && info_->is_concurrent()) { |
| 424 isolate->optimizing_compiler_thread()->PauseMainThread(); |
| 425 } |
| 426 } |
| 427 |
| 428 |
| 429 OptimizingCompilerThread::SynchronizedScope::~SynchronizedScope() { |
| 430 Isolate* isolate = info_->isolate(); |
| 431 if (isolate->concurrent_recompilation_enabled() && info_->is_concurrent()) { |
| 432 isolate->optimizing_compiler_thread()->ContinueMainThread(); |
| 433 } |
| 434 } |
| 435 |
| 436 |
| 376 #ifdef DEBUG | 437 #ifdef DEBUG |
| 377 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { | 438 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { |
| 378 return isolate->concurrent_recompilation_enabled() && | 439 return isolate->concurrent_recompilation_enabled() && |
| 379 isolate->optimizing_compiler_thread()->IsOptimizerThread(); | 440 isolate->optimizing_compiler_thread()->IsOptimizerThread(); |
| 380 } | 441 } |
| 381 | 442 |
| 382 | 443 |
| 383 bool OptimizingCompilerThread::IsOptimizerThread() { | 444 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 384 LockGuard<Mutex> lock_guard(&thread_id_mutex_); | 445 LockGuard<Mutex> lock_guard(&thread_id_mutex_); |
| 385 return ThreadId::Current().ToInteger() == thread_id_; | 446 return ThreadId::Current().ToInteger() == thread_id_; |
| 386 } | 447 } |
| 387 #endif | 448 #endif |
| 388 | 449 |
| 389 | 450 |
| 390 } } // namespace v8::internal | 451 } } // namespace v8::internal |
| OLD | NEW |