Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 53 | 53 |
| 54 while (true) { | 54 while (true) { |
| 55 input_queue_semaphore_->Wait(); | 55 input_queue_semaphore_->Wait(); |
| 56 Logger::TimerEventScope timer( | 56 Logger::TimerEventScope timer( |
| 57 isolate_, Logger::TimerEventScope::v8_recompile_parallel); | 57 isolate_, Logger::TimerEventScope::v8_recompile_parallel); |
| 58 | 58 |
| 59 if (FLAG_parallel_recompilation_delay != 0) { | 59 if (FLAG_parallel_recompilation_delay != 0) { |
| 60 OS::Sleep(FLAG_parallel_recompilation_delay); | 60 OS::Sleep(FLAG_parallel_recompilation_delay); |
| 61 } | 61 } |
| 62 | 62 |
| 63 if (Acquire_Load(&stop_thread_)) { | 63 switch (static_cast<StopFlag>(Acquire_Load(&stop_thread_))) { |
| 64 stop_semaphore_->Signal(); | 64 case CONTINUE: |
| 65 if (FLAG_trace_parallel_recompilation) { | 65 break; |
| 66 time_spent_total_ = OS::Ticks() - epoch; | 66 case STOP: |
| 67 } | 67 if (FLAG_trace_parallel_recompilation) { |
| 68 return; | 68 time_spent_total_ = OS::Ticks() - epoch; |
| 69 } | |
| 70 stop_semaphore_->Signal(); | |
| 71 return; | |
| 72 case FLUSH: | |
| 73 // The main thread is blocked, waiting for the stop semaphore. | |
| 74 { AllowHandleDereference allow_handle_dereference; | |
| 75 FlushInputQueue(true); | |
| 76 } | |
| 77 NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0)); | |
|
Hannes Payer (out of office)
2013/08/07 09:17:32
I think we need a memory barrier for both stores o
| |
| 78 NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE)); | |
| 79 stop_semaphore_->Signal(); | |
| 80 // Return to start of consumer loop. | |
| 81 continue; | |
| 69 } | 82 } |
| 70 | 83 |
| 71 int64_t compiling_start = 0; | 84 int64_t compiling_start = 0; |
| 72 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); | 85 if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); |
| 73 | 86 |
| 74 CompileNext(); | 87 CompileNext(); |
| 75 | 88 |
| 76 if (FLAG_trace_parallel_recompilation) { | 89 if (FLAG_trace_parallel_recompilation) { |
| 77 time_spent_compiling_ += OS::Ticks() - compiling_start; | 90 time_spent_compiling_ += OS::Ticks() - compiling_start; |
| 78 } | 91 } |
| 79 } | 92 } |
| 80 } | 93 } |
| 81 | 94 |
| 82 | 95 |
| 83 void OptimizingCompilerThread::CompileNext() { | 96 void OptimizingCompilerThread::CompileNext() { |
| 84 OptimizingCompiler* optimizing_compiler = NULL; | 97 OptimizingCompiler* optimizing_compiler = NULL; |
| 85 input_queue_.Dequeue(&optimizing_compiler); | 98 bool result = input_queue_.Dequeue(&optimizing_compiler); |
| 99 USE(result); | |
| 100 ASSERT(result); | |
| 86 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); | 101 Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); |
| 87 | 102 |
| 88 // The function may have already been optimized by OSR. Simply continue. | 103 // The function may have already been optimized by OSR. Simply continue. |
| 89 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); | 104 OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); |
| 90 USE(status); // Prevent an unused-variable error in release mode. | 105 USE(status); // Prevent an unused-variable error in release mode. |
| 91 ASSERT(status != OptimizingCompiler::FAILED); | 106 ASSERT(status != OptimizingCompiler::FAILED); |
| 92 | 107 |
| 93 // The function may have already been optimized by OSR. Simply continue. | 108 // The function may have already been optimized by OSR. Simply continue. |
| 94 // Use a mutex to make sure that functions marked for install | 109 // Use a mutex to make sure that functions marked for install |
| 95 // are always also queued. | 110 // are always also queued. |
| 96 ScopedLock mark_and_queue(install_mutex_); | 111 ScopedLock mark_and_queue(install_mutex_); |
| 97 { Heap::RelocationLock relocation_lock(isolate_->heap()); | 112 { Heap::RelocationLock relocation_lock(isolate_->heap()); |
| 98 AllowHandleDereference ahd; | 113 AllowHandleDereference ahd; |
| 99 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); | 114 optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); |
| 100 } | 115 } |
| 101 output_queue_.Enqueue(optimizing_compiler); | 116 output_queue_.Enqueue(optimizing_compiler); |
| 102 } | 117 } |
| 103 | 118 |
| 104 | 119 |
| 120 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | |
| 121 OptimizingCompiler* optimizing_compiler; | |
| 122 // The optimizing compiler is allocated in the CompilationInfo's zone. | |
| 123 while (input_queue_.Dequeue(&optimizing_compiler)) { | |
| 124 // This should not block, since we have one signal on the input queue | |
| 125 // semaphore corresponding to each element in the input queue. | |
| 126 input_queue_semaphore_->Wait(); | |
| 127 CompilationInfo* info = optimizing_compiler->info(); | |
| 128 if (restore_function_code) { | |
| 129 Handle<JSFunction> function = info->closure(); | |
| 130 function->ReplaceCode(function->shared()->code()); | |
| 131 } | |
| 132 delete info; | |
| 133 } | |
| 134 } | |
| 135 | |
| 136 | |
| 137 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | |
| 138 OptimizingCompiler* optimizing_compiler; | |
| 139 // The optimizing compiler is allocated in the CompilationInfo's zone. | |
| 140 while (output_queue_.Dequeue(&optimizing_compiler)) { | |
| 141 CompilationInfo* info = optimizing_compiler->info(); | |
| 142 if (restore_function_code) { | |
| 143 AllowHandleDereference allow_handle_dereference; | |
| 144 Handle<JSFunction> function = info->closure(); | |
| 145 function->ReplaceCode(function->shared()->code()); | |
| 146 } | |
| 147 delete info; | |
| 148 } | |
| 149 } | |
| 150 | |
| 151 | |
| 152 void OptimizingCompilerThread::Flush() { | |
| 153 ASSERT(!IsOptimizerThread()); | |
| 154 Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH)); | |
| 155 input_queue_semaphore_->Signal(); | |
| 156 stop_semaphore_->Wait(); | |
| 157 FlushOutputQueue(true); | |
| 158 } | |
| 159 | |
| 160 | |
| 105 void OptimizingCompilerThread::Stop() { | 161 void OptimizingCompilerThread::Stop() { |
| 106 ASSERT(!IsOptimizerThread()); | 162 ASSERT(!IsOptimizerThread()); |
| 107 Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); | 163 Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP)); |
| 108 input_queue_semaphore_->Signal(); | 164 input_queue_semaphore_->Signal(); |
| 109 stop_semaphore_->Wait(); | 165 stop_semaphore_->Wait(); |
| 110 | 166 |
| 111 if (FLAG_parallel_recompilation_delay != 0) { | 167 if (FLAG_parallel_recompilation_delay != 0) { |
| 112 // Barrier when loading queue length is not necessary since the write | 168 // Barrier when loading queue length is not necessary since the write |
| 113 // happens in CompileNext on the same thread. | 169 // happens in CompileNext on the same thread. |
| 170 // This is used only for testing. | |
| 114 while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); | 171 while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); |
| 115 InstallOptimizedFunctions(); | 172 InstallOptimizedFunctions(); |
| 116 } else { | 173 } else { |
| 117 OptimizingCompiler* optimizing_compiler; | 174 FlushInputQueue(false); |
| 118 // The optimizing compiler is allocated in the CompilationInfo's zone. | 175 FlushOutputQueue(false); |
| 119 while (input_queue_.Dequeue(&optimizing_compiler)) { | |
| 120 delete optimizing_compiler->info(); | |
| 121 } | |
| 122 while (output_queue_.Dequeue(&optimizing_compiler)) { | |
| 123 delete optimizing_compiler->info(); | |
| 124 } | |
| 125 } | 176 } |
| 126 | 177 |
| 127 if (FLAG_trace_parallel_recompilation) { | 178 if (FLAG_trace_parallel_recompilation) { |
| 128 double compile_time = static_cast<double>(time_spent_compiling_); | 179 double compile_time = static_cast<double>(time_spent_compiling_); |
| 129 double total_time = static_cast<double>(time_spent_total_); | 180 double total_time = static_cast<double>(time_spent_total_); |
| 130 double percentage = (compile_time * 100) / total_time; | 181 double percentage = (compile_time * 100) / total_time; |
| 131 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | 182 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); |
| 132 } | 183 } |
| 133 | 184 |
| 134 Join(); | 185 Join(); |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 163 #ifdef DEBUG | 214 #ifdef DEBUG |
| 164 bool OptimizingCompilerThread::IsOptimizerThread() { | 215 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 165 if (!FLAG_parallel_recompilation) return false; | 216 if (!FLAG_parallel_recompilation) return false; |
| 166 ScopedLock lock(thread_id_mutex_); | 217 ScopedLock lock(thread_id_mutex_); |
| 167 return ThreadId::Current().ToInteger() == thread_id_; | 218 return ThreadId::Current().ToInteger() == thread_id_; |
| 168 } | 219 } |
| 169 #endif | 220 #endif |
| 170 | 221 |
| 171 | 222 |
| 172 } } // namespace v8::internal | 223 } } // namespace v8::internal |
| OLD | NEW |