Chromium Code Reviews| Index: src/optimizing-compiler-thread.cc |
| diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc |
| index d21507084c5f9d0de5d3eba72831f0f464665329..ff2995469bd83853c7f80e73e58f43c6128b0d37 100644 |
| --- a/src/optimizing-compiler-thread.cc |
| +++ b/src/optimizing-compiler-thread.cc |
| @@ -74,7 +74,7 @@ void OptimizingCompilerThread::Run() { |
| OS::Sleep(FLAG_concurrent_recompilation_delay); |
| } |
| - switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) { |
| + switch (static_cast<LoopSwitch>(Acquire_Load(&loop_switch_))) { |
| case CONTINUE: |
| break; |
| case STOP: |
| @@ -86,9 +86,9 @@ void OptimizingCompilerThread::Run() { |
| case FLUSH: |
| // The main thread is blocked, waiting for the stop semaphore. |
| { AllowHandleDereference allow_handle_dereference; |
| - FlushInputQueue(true); |
| + FlushInputQueue(RESTORE_FUNCTION_CODE); |
| } |
| - Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); |
| + Release_Store(&loop_switch_, static_cast<AtomicWord>(CONTINUE)); |
| stop_semaphore_.Signal(); |
| // Return to start of consumer loop. |
| continue; |
| @@ -107,7 +107,7 @@ void OptimizingCompilerThread::Run() { |
| OptimizedCompileJob* OptimizingCompilerThread::NextInput() { |
| - LockGuard<Mutex> access_input_queue_(&input_queue_mutex_); |
| + LockGuard<Mutex> access_input_queue_(&mutex_); |
| if (input_queue_length_ == 0) return NULL; |
| OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| ASSERT_NE(NULL, job); |
| @@ -134,11 +134,11 @@ void OptimizingCompilerThread::CompileNext() { |
| } |
| -static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, |
| - bool restore_function_code) { |
| +void OptimizingCompilerThread::DisposeOptimizedCompileJob( |
| + OptimizedCompileJob* job, FlushMode mode) { |
| // The recompile job is allocated in the CompilationInfo's zone. |
| CompilationInfo* info = job->info(); |
| - if (restore_function_code) { |
| + if (mode == RESTORE_FUNCTION_CODE) { |
| if (info->is_osr()) { |
| if (!job->IsWaitingForInstall()) { |
| // Remove stack check that guards OSR entry on original code. |
| @@ -155,7 +155,7 @@ static void DisposeOptimizedCompileJob(OptimizedCompileJob* job, |
| } |
| -void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
| +void OptimizingCompilerThread::FlushInputQueue(FlushMode mode) { |
| OptimizedCompileJob* job; |
| while ((job = NextInput())) { |
| // This should not block, since we have one signal on the input queue |
| @@ -163,41 +163,61 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { |
| input_queue_semaphore_.Wait(); |
| // OSR jobs are dealt with separately. |
| if (!job->info()->is_osr()) { |
| - DisposeOptimizedCompileJob(job, restore_function_code); |
| + DisposeOptimizedCompileJob(job, mode); |
| } |
| } |
| } |
| -void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { |
| +void OptimizingCompilerThread::FlushOutputQueue(FlushMode mode) { |
| OptimizedCompileJob* job; |
| while (output_queue_.Dequeue(&job)) { |
| // OSR jobs are dealt with separately. |
| if (!job->info()->is_osr()) { |
| - DisposeOptimizedCompileJob(job, restore_function_code); |
| + DisposeOptimizedCompileJob(job, mode); |
| } |
| } |
| } |
| -void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { |
| +void OptimizingCompilerThread::FlushOsrBuffer(FlushMode mode) { |
| for (int i = 0; i < osr_buffer_capacity_; i++) { |
| if (osr_buffer_[i] != NULL) { |
| - DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); |
| + DisposeOptimizedCompileJob(osr_buffer_[i], mode); |
| osr_buffer_[i] = NULL; |
| } |
| } |
| } |
| -void OptimizingCompilerThread::Flush() { |
| +void OptimizingCompilerThread::SetSwitchAndInterceptInterrupt( |
| + LoopSwitch loop_switch) { |
| + // The compiler thread may be waiting for the main (this) thread to handle |
| + // a stack check interrupt when entering a SynchronizedScope. |
| + // Use a mutex when changing the loop switch and checking the stack guard |
| + // state to avoid race with PauseMainThread. |
| + { LockGuard<Mutex> set_switch_and_check_interrupt(&mutex_); |
| + NoBarrier_Store(&loop_switch_, static_cast<AtomicWord>(loop_switch)); |
|
Hannes Payer (out of office)
2014/03/21 12:44:31
loop_switch_ is accessed with NoBarrier and Acquir
Yang
2014/03/21 13:01:06
For the record, I was thinking the mutex scope wou
|
| + if (!isolate_->stack_guard()->IsCompilerSyncRequest()) return; |
| + } |
| + isolate_->stack_guard()->Continue(COMPILER_SYNC); |
| + YieldToCompilerThread(); |
| +} |
| + |
| + |
| +void OptimizingCompilerThread::PrepareInterruption(LoopSwitch loop_switch) { |
| ASSERT(!IsOptimizerThread()); |
| - Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); |
| if (FLAG_block_concurrent_recompilation) Unblock(); |
| + SetSwitchAndInterceptInterrupt(loop_switch); |
| input_queue_semaphore_.Signal(); |
| stop_semaphore_.Wait(); |
| - FlushOutputQueue(true); |
| - if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
| +} |
| + |
| + |
| +void OptimizingCompilerThread::Flush() { |
| + PrepareInterruption(FLUSH); |
| + FlushOutputQueue(RESTORE_FUNCTION_CODE); |
| + if (FLAG_concurrent_osr) FlushOsrBuffer(RESTORE_FUNCTION_CODE); |
| if (FLAG_trace_concurrent_recompilation) { |
| PrintF(" ** Flushed concurrent recompilation queues.\n"); |
| } |
| @@ -205,23 +225,18 @@ void OptimizingCompilerThread::Flush() { |
| void OptimizingCompilerThread::Stop() { |
| - ASSERT(!IsOptimizerThread()); |
| - Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); |
| - if (FLAG_block_concurrent_recompilation) Unblock(); |
| - input_queue_semaphore_.Signal(); |
| - stop_semaphore_.Wait(); |
| - |
| + PrepareInterruption(STOP); |
| if (FLAG_concurrent_recompilation_delay != 0) { |
| // At this point the optimizing compiler thread's event loop has stopped. |
| // There is no need for a mutex when reading input_queue_length_. |
| while (input_queue_length_ > 0) CompileNext(); |
| InstallOptimizedFunctions(); |
| } else { |
| - FlushInputQueue(false); |
| - FlushOutputQueue(false); |
| + FlushInputQueue(DO_NOT_RESTORE_FUNCTION_CODE); |
| + FlushOutputQueue(DO_NOT_RESTORE_FUNCTION_CODE); |
| } |
| - if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
| + if (FLAG_concurrent_osr) FlushOsrBuffer(DO_NOT_RESTORE_FUNCTION_CODE); |
| if (FLAG_trace_concurrent_recompilation) { |
| double percentage = time_spent_compiling_.PercentOf(time_spent_total_); |
| @@ -274,7 +289,7 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { |
| osr_attempts_++; |
| AddToOsrBuffer(job); |
| // Add job to the front of the input queue. |
| - LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| + LockGuard<Mutex> access_input_queue(&mutex_); |
| ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| // Move shift_ back by one. |
| input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| @@ -282,7 +297,7 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { |
| input_queue_length_++; |
| } else { |
| // Add job to the back of the input queue. |
| - LockGuard<Mutex> access_input_queue(&input_queue_mutex_); |
| + LockGuard<Mutex> access_input_queue(&mutex_); |
| ASSERT_LT(input_queue_length_, input_queue_capacity_); |
| input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| input_queue_length_++; |
| @@ -366,13 +381,59 @@ void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) { |
| info->closure()->PrintName(); |
| PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); |
| } |
| - DisposeOptimizedCompileJob(stale, false); |
| + DisposeOptimizedCompileJob(stale, DO_NOT_RESTORE_FUNCTION_CODE); |
| } |
| osr_buffer_[osr_buffer_cursor_] = job; |
| osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| } |
| +void OptimizingCompilerThread::PauseMainThread() { |
| + ASSERT(IsOptimizerThread()); |
| + // Request a stack check interrupt if we are not flushing or stopping. |
| + // Make sure the state does not change after the check using a mutex. |
| + { LockGuard<Mutex> check_switch_and_set_interrupt(&mutex_); |
| + if (static_cast<LoopSwitch>(NoBarrier_Load(&loop_switch_)) != CONTINUE) { |
| + return; |
| + } |
| + isolate_->stack_guard()->RequestCompilerSync(); |
| + } |
| + compiler_thread_semaphore_.Wait(); |
| +} |
| + |
| + |
| +void OptimizingCompilerThread::ContinueMainThread() { |
| + ASSERT(IsOptimizerThread()); |
| + main_thread_semaphore_.Signal(); |
| +} |
| + |
| + |
| +void OptimizingCompilerThread::YieldToCompilerThread() { |
| + ASSERT(!IsOptimizerThread()); |
| + Logger::TimerEventScope scope( |
| + isolate_, Logger::TimerEventScope::v8_recompile_synchronized); |
| + compiler_thread_semaphore_.Signal(); |
| + main_thread_semaphore_.Wait(); |
| +} |
| + |
| + |
| +OptimizingCompilerThread::SynchronizedScope::SynchronizedScope( |
| + CompilationInfo* info) : info_(info) { |
| + Isolate* isolate = info_->isolate(); |
| + if (isolate->concurrent_recompilation_enabled() && info_->is_concurrent()) { |
| + isolate->optimizing_compiler_thread()->PauseMainThread(); |
| + } |
| +} |
| + |
| + |
| +OptimizingCompilerThread::SynchronizedScope::~SynchronizedScope() { |
| + Isolate* isolate = info_->isolate(); |
| + if (isolate->concurrent_recompilation_enabled() && info_->is_concurrent()) { |
| + isolate->optimizing_compiler_thread()->ContinueMainThread(); |
| + } |
| +} |
| + |
| + |
| #ifdef DEBUG |
| bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { |
| return isolate->concurrent_recompilation_enabled() && |