| Index: src/optimizing-compiler-thread.cc
|
| diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
|
| index 21ef237107194d06a897110444c01710eae31bd0..337ddd4b2d2d01e1682aaed65cb62276655beaf0 100644
|
| --- a/src/optimizing-compiler-thread.cc
|
| +++ b/src/optimizing-compiler-thread.cc
|
| @@ -60,12 +60,25 @@ void OptimizingCompilerThread::Run() {
|
| OS::Sleep(FLAG_parallel_recompilation_delay);
|
| }
|
|
|
| - if (Acquire_Load(&stop_thread_)) {
|
| - stop_semaphore_->Signal();
|
| - if (FLAG_trace_parallel_recompilation) {
|
| - time_spent_total_ = OS::Ticks() - epoch;
|
| - }
|
| - return;
|
| + switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) {
|
| + case CONTINUE:
|
| + break;
|
| + case STOP:
|
| + if (FLAG_trace_parallel_recompilation) {
|
| + time_spent_total_ = OS::Ticks() - epoch;
|
| + }
|
| + stop_semaphore_->Signal();
|
| + return;
|
| + case FLUSH:
|
| + // The main thread is blocked, waiting for the stop semaphore.
|
| + { AllowHandleDereference allow_handle_dereference;
|
| + FlushInputQueue(true);
|
| + }
|
| + Release_Store(&queue_length_, static_cast<AtomicWord>(0));
|
| + Release_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
|
| + stop_semaphore_->Signal();
|
| + // Return to start of consumer loop.
|
| + continue;
|
| }
|
|
|
| int64_t compiling_start = 0;
|
| @@ -82,7 +95,9 @@ void OptimizingCompilerThread::Run() {
|
|
|
| void OptimizingCompilerThread::CompileNext() {
|
| OptimizingCompiler* optimizing_compiler = NULL;
|
| - input_queue_.Dequeue(&optimizing_compiler);
|
| + bool result = input_queue_.Dequeue(&optimizing_compiler);
|
| + USE(result);
|
| + ASSERT(result);
|
| Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
|
|
|
| // The function may have already been optimized by OSR. Simply continue.
|
| @@ -102,26 +117,61 @@ void OptimizingCompilerThread::CompileNext() {
|
| }
|
|
|
|
|
| +void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
|
| + OptimizingCompiler* optimizing_compiler;
|
| + // The optimizing compiler is allocated in the CompilationInfo's zone.
|
| + while (input_queue_.Dequeue(&optimizing_compiler)) {
|
| + // This should not block, since we have one signal on the input queue
|
| + // semaphore corresponding to each element in the input queue.
|
| + input_queue_semaphore_->Wait();
|
| + CompilationInfo* info = optimizing_compiler->info();
|
| + if (restore_function_code) {
|
| + Handle<JSFunction> function = info->closure();
|
| + function->ReplaceCode(function->shared()->code());
|
| + }
|
| + delete info;
|
| + }
|
| +}
|
| +
|
| +
|
| +void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
|
| + OptimizingCompiler* optimizing_compiler;
|
| + // The optimizing compiler is allocated in the CompilationInfo's zone.
|
| + while (output_queue_.Dequeue(&optimizing_compiler)) {
|
| + CompilationInfo* info = optimizing_compiler->info();
|
| + if (restore_function_code) {
|
| + Handle<JSFunction> function = info->closure();
|
| + function->ReplaceCode(function->shared()->code());
|
| + }
|
| + delete info;
|
| + }
|
| +}
|
| +
|
| +
|
| +void OptimizingCompilerThread::Flush() {
|
| + ASSERT(!IsOptimizerThread());
|
| + Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
|
| + input_queue_semaphore_->Signal();
|
| + stop_semaphore_->Wait();
|
| + FlushOutputQueue(true);
|
| +}
|
| +
|
| +
|
| void OptimizingCompilerThread::Stop() {
|
| ASSERT(!IsOptimizerThread());
|
| - Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
|
| + Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
|
| input_queue_semaphore_->Signal();
|
| stop_semaphore_->Wait();
|
|
|
| if (FLAG_parallel_recompilation_delay != 0) {
|
| // Barrier when loading queue length is not necessary since the write
|
| // happens in CompileNext on the same thread.
|
| + // This is used only for testing.
|
| while (NoBarrier_Load(&queue_length_) > 0) CompileNext();
|
| InstallOptimizedFunctions();
|
| } else {
|
| - OptimizingCompiler* optimizing_compiler;
|
| - // The optimizing compiler is allocated in the CompilationInfo's zone.
|
| - while (input_queue_.Dequeue(&optimizing_compiler)) {
|
| - delete optimizing_compiler->info();
|
| - }
|
| - while (output_queue_.Dequeue(&optimizing_compiler)) {
|
| - delete optimizing_compiler->info();
|
| - }
|
| + FlushInputQueue(false);
|
| + FlushOutputQueue(false);
|
| }
|
|
|
| if (FLAG_trace_parallel_recompilation) {
|
|
|