| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compile-dispatcher.h" |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
| 11 #include "src/hydrogen.h" | 11 #include "src/hydrogen.h" |
| 12 #include "src/isolate.h" | 12 #include "src/isolate.h" |
| 13 #include "src/v8threads.h" | |
| 14 | 13 |
| 15 namespace v8 { | 14 namespace v8 { |
| 16 namespace internal { | 15 namespace internal { |
| 17 | 16 |
| 18 namespace { | 17 namespace { |
| 19 | 18 |
| 20 void DisposeOptimizedCompileJob(OptimizedCompileJob* job, | 19 void DisposeOptimizedCompileJob(OptimizedCompileJob* job, |
| 21 bool restore_function_code) { | 20 bool restore_function_code) { |
| 22 // The recompile job is allocated in the CompilationInfo's zone. | 21 // The recompile job is allocated in the CompilationInfo's zone. |
| 23 CompilationInfo* info = job->info(); | 22 CompilationInfo* info = job->info(); |
| 24 if (restore_function_code) { | 23 if (restore_function_code) { |
| 25 if (info->is_osr()) { | 24 if (info->is_osr()) { |
| 26 if (!job->IsWaitingForInstall()) { | 25 if (!job->IsWaitingForInstall()) { |
| 27 // Remove stack check that guards OSR entry on original code. | 26 // Remove stack check that guards OSR entry on original code. |
| 28 Handle<Code> code = info->unoptimized_code(); | 27 Handle<Code> code = info->unoptimized_code(); |
| 29 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | 28 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); |
| 30 BackEdgeTable::RemoveStackCheck(code, offset); | 29 BackEdgeTable::RemoveStackCheck(code, offset); |
| 31 } | 30 } |
| 32 } else { | 31 } else { |
| 33 Handle<JSFunction> function = info->closure(); | 32 Handle<JSFunction> function = info->closure(); |
| 34 function->ReplaceCode(function->shared()->code()); | 33 function->ReplaceCode(function->shared()->code()); |
| 35 } | 34 } |
| 36 } | 35 } |
| 37 delete info; | 36 delete info; |
| 38 } | 37 } |
| 39 | 38 |
| 40 } // namespace | 39 } // namespace |
| 41 | 40 |
| 42 | 41 |
| 43 class OptimizingCompilerThread::CompileTask : public v8::Task { | 42 class OptimizingCompileDispatcher::CompileTask : public v8::Task { |
| 44 public: | 43 public: |
| 45 explicit CompileTask(Isolate* isolate) : isolate_(isolate) { | 44 explicit CompileTask(Isolate* isolate) : isolate_(isolate) { |
| 46 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread(); | 45 OptimizingCompileDispatcher* dispatcher = |
| 47 base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_); | 46 isolate_->optimizing_compile_dispatcher(); |
| 48 ++thread->ref_count_; | 47 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_); |
| 48 ++dispatcher->ref_count_; |
| 49 } | 49 } |
| 50 | 50 |
| 51 virtual ~CompileTask() {} | 51 virtual ~CompileTask() {} |
| 52 | 52 |
| 53 private: | 53 private: |
| 54 // v8::Task overrides. | 54 // v8::Task overrides. |
| 55 void Run() OVERRIDE { | 55 void Run() OVERRIDE { |
| 56 DisallowHeapAllocation no_allocation; | 56 DisallowHeapAllocation no_allocation; |
| 57 DisallowHandleAllocation no_handles; | 57 DisallowHandleAllocation no_handles; |
| 58 DisallowHandleDereference no_deref; | 58 DisallowHandleDereference no_deref; |
| 59 | 59 |
| 60 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread(); | 60 OptimizingCompileDispatcher* dispatcher = |
| 61 isolate_->optimizing_compile_dispatcher(); |
| 61 { | 62 { |
| 62 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | 63 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
| 63 | 64 |
| 64 if (thread->recompilation_delay_ != 0) { | 65 if (dispatcher->recompilation_delay_ != 0) { |
| 65 base::OS::Sleep(thread->recompilation_delay_); | 66 base::OS::Sleep(dispatcher->recompilation_delay_); |
| 66 } | 67 } |
| 67 | 68 |
| 68 thread->CompileNext(thread->NextInput(true)); | 69 dispatcher->CompileNext(dispatcher->NextInput(true)); |
| 69 } | 70 } |
| 70 { | 71 { |
| 71 base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_); | 72 base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_); |
| 72 if (--thread->ref_count_ == 0) { | 73 if (--dispatcher->ref_count_ == 0) { |
| 73 thread->ref_count_zero_.NotifyOne(); | 74 dispatcher->ref_count_zero_.NotifyOne(); |
| 74 } | 75 } |
| 75 } | 76 } |
| 76 } | 77 } |
| 77 | 78 |
| 78 Isolate* isolate_; | 79 Isolate* isolate_; |
| 79 | 80 |
| 80 DISALLOW_COPY_AND_ASSIGN(CompileTask); | 81 DISALLOW_COPY_AND_ASSIGN(CompileTask); |
| 81 }; | 82 }; |
| 82 | 83 |
| 83 | 84 |
| 84 OptimizingCompilerThread::~OptimizingCompilerThread() { | 85 OptimizingCompileDispatcher::~OptimizingCompileDispatcher() { |
| 85 #ifdef DEBUG | 86 #ifdef DEBUG |
| 86 { | 87 { |
| 87 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | 88 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); |
| 88 DCHECK_EQ(0, ref_count_); | 89 DCHECK_EQ(0, ref_count_); |
| 89 } | 90 } |
| 90 #endif | 91 #endif |
| 91 DCHECK_EQ(0, input_queue_length_); | 92 DCHECK_EQ(0, input_queue_length_); |
| 92 DeleteArray(input_queue_); | 93 DeleteArray(input_queue_); |
| 93 if (FLAG_concurrent_osr) { | 94 if (FLAG_concurrent_osr) { |
| 94 #ifdef DEBUG | 95 #ifdef DEBUG |
| 95 for (int i = 0; i < osr_buffer_capacity_; i++) { | 96 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 96 CHECK_NULL(osr_buffer_[i]); | 97 CHECK_NULL(osr_buffer_[i]); |
| 97 } | 98 } |
| 98 #endif | 99 #endif |
| 99 DeleteArray(osr_buffer_); | 100 DeleteArray(osr_buffer_); |
| 100 } | 101 } |
| 101 } | 102 } |
| 102 | 103 |
| 103 | 104 |
| 104 void OptimizingCompilerThread::Run() { | 105 OptimizedCompileJob* OptimizingCompileDispatcher::NextInput( |
| 105 #ifdef DEBUG | |
| 106 { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | |
| 107 thread_id_ = ThreadId::Current().ToInteger(); | |
| 108 } | |
| 109 #endif | |
| 110 DisallowHeapAllocation no_allocation; | |
| 111 DisallowHandleAllocation no_handles; | |
| 112 DisallowHandleDereference no_deref; | |
| 113 | |
| 114 if (job_based_recompilation_) { | |
| 115 return; | |
| 116 } | |
| 117 | |
| 118 base::ElapsedTimer total_timer; | |
| 119 if (tracing_enabled_) total_timer.Start(); | |
| 120 | |
| 121 while (true) { | |
| 122 input_queue_semaphore_.Wait(); | |
| 123 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | |
| 124 | |
| 125 if (recompilation_delay_ != 0) { | |
| 126 base::OS::Sleep(recompilation_delay_); | |
| 127 } | |
| 128 | |
| 129 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { | |
| 130 case CONTINUE: | |
| 131 break; | |
| 132 case STOP: | |
| 133 if (tracing_enabled_) { | |
| 134 time_spent_total_ = total_timer.Elapsed(); | |
| 135 } | |
| 136 stop_semaphore_.Signal(); | |
| 137 return; | |
| 138 case FLUSH: | |
| 139 // The main thread is blocked, waiting for the stop semaphore. | |
| 140 { AllowHandleDereference allow_handle_dereference; | |
| 141 FlushInputQueue(true); | |
| 142 } | |
| 143 base::Release_Store(&stop_thread_, | |
| 144 static_cast<base::AtomicWord>(CONTINUE)); | |
| 145 stop_semaphore_.Signal(); | |
| 146 // Return to start of consumer loop. | |
| 147 continue; | |
| 148 } | |
| 149 | |
| 150 base::ElapsedTimer compiling_timer; | |
| 151 if (tracing_enabled_) compiling_timer.Start(); | |
| 152 | |
| 153 CompileNext(NextInput()); | |
| 154 | |
| 155 if (tracing_enabled_) { | |
| 156 time_spent_compiling_ += compiling_timer.Elapsed(); | |
| 157 } | |
| 158 } | |
| 159 } | |
| 160 | |
| 161 | |
| 162 OptimizedCompileJob* OptimizingCompilerThread::NextInput( | |
| 163 bool check_if_flushing) { | 106 bool check_if_flushing) { |
| 164 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); | 107 base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_); |
| 165 if (input_queue_length_ == 0) return NULL; | 108 if (input_queue_length_ == 0) return NULL; |
| 166 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 109 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 167 DCHECK_NOT_NULL(job); | 110 DCHECK_NOT_NULL(job); |
| 168 input_queue_shift_ = InputQueueIndex(1); | 111 input_queue_shift_ = InputQueueIndex(1); |
| 169 input_queue_length_--; | 112 input_queue_length_--; |
| 170 if (check_if_flushing) { | 113 if (check_if_flushing) { |
| 171 if (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)) != CONTINUE) { | 114 if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) { |
| 172 if (!job->info()->is_osr()) { | 115 if (!job->info()->is_osr()) { |
| 173 AllowHandleDereference allow_handle_dereference; | 116 AllowHandleDereference allow_handle_dereference; |
| 174 DisposeOptimizedCompileJob(job, true); | 117 DisposeOptimizedCompileJob(job, true); |
| 175 } | 118 } |
| 176 return NULL; | 119 return NULL; |
| 177 } | 120 } |
| 178 } | 121 } |
| 179 return job; | 122 return job; |
| 180 } | 123 } |
| 181 | 124 |
| 182 | 125 |
| 183 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { | 126 void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) { |
| 184 if (!job) return; | 127 if (!job) return; |
| 185 | 128 |
| 186 // The function may have already been optimized by OSR. Simply continue. | 129 // The function may have already been optimized by OSR. Simply continue. |
| 187 OptimizedCompileJob::Status status = job->OptimizeGraph(); | 130 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
| 188 USE(status); // Prevent an unused-variable error in release mode. | 131 USE(status); // Prevent an unused-variable error in release mode. |
| 189 DCHECK(status != OptimizedCompileJob::FAILED); | 132 DCHECK(status != OptimizedCompileJob::FAILED); |
| 190 | 133 |
| 191 // The function may have already been optimized by OSR. Simply continue. | 134 // The function may have already been optimized by OSR. Simply continue. |
| 192 // Use a mutex to make sure that functions marked for install | 135 // Use a mutex to make sure that functions marked for install |
| 193 // are always also queued. | 136 // are always also queued. |
| 194 if (job_based_recompilation_) output_queue_mutex_.Lock(); | 137 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); |
| 195 output_queue_.Enqueue(job); | 138 output_queue_.push(job); |
| 196 if (job_based_recompilation_) output_queue_mutex_.Unlock(); | |
| 197 isolate_->stack_guard()->RequestInstallCode(); | 139 isolate_->stack_guard()->RequestInstallCode(); |
| 198 } | 140 } |
| 199 | 141 |
| 200 | 142 |
| 201 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) { | 143 void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) { |
| 202 OptimizedCompileJob* job; | 144 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); |
| 203 while ((job = NextInput())) { | 145 while (!output_queue_.empty()) { |
| 204 DCHECK(!job_based_recompilation_); | 146 OptimizedCompileJob* job = output_queue_.front(); |
| 205 // This should not block, since we have one signal on the input queue | 147 output_queue_.pop(); |
| 206 // semaphore corresponding to each element in the input queue. | 148 |
| 207 input_queue_semaphore_.Wait(); | |
| 208 // OSR jobs are dealt with separately. | 149 // OSR jobs are dealt with separately. |
| 209 if (!job->info()->is_osr()) { | 150 if (!job->info()->is_osr()) { |
| 210 DisposeOptimizedCompileJob(job, restore_function_code); | 151 DisposeOptimizedCompileJob(job, restore_function_code); |
| 211 } | |
| 212 } | |
| 213 } | |
| 214 | |
| 215 | |
| 216 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) { | |
| 217 OptimizedCompileJob* job; | |
| 218 while (output_queue_.Dequeue(&job)) { | |
| 219 // OSR jobs are dealt with separately. | |
| 220 if (!job->info()->is_osr()) { | |
| 221 DisposeOptimizedCompileJob(job, restore_function_code); | |
| 222 } | 152 } |
| 223 } | 153 } |
| 224 } | 154 } |
| 225 | 155 |
| 226 | 156 |
| 227 void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) { | 157 void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) { |
| 228 for (int i = 0; i < osr_buffer_capacity_; i++) { | 158 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 229 if (osr_buffer_[i] != NULL) { | 159 if (osr_buffer_[i] != NULL) { |
| 230 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); | 160 DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code); |
| 231 osr_buffer_[i] = NULL; | 161 osr_buffer_[i] = NULL; |
| 232 } | 162 } |
| 233 } | 163 } |
| 234 } | 164 } |
| 235 | 165 |
| 236 | 166 |
| 237 void OptimizingCompilerThread::Flush() { | 167 void OptimizingCompileDispatcher::Flush() { |
| 238 DCHECK(!IsOptimizerThread()); | 168 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH)); |
| 239 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | |
| 240 if (FLAG_block_concurrent_recompilation) Unblock(); | 169 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 241 if (!job_based_recompilation_) { | 170 { |
| 242 input_queue_semaphore_.Signal(); | |
| 243 stop_semaphore_.Wait(); | |
| 244 } else { | |
| 245 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | 171 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); |
| 246 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); | 172 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); |
| 247 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE)); | 173 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE)); |
| 248 } | 174 } |
| 249 FlushOutputQueue(true); | 175 FlushOutputQueue(true); |
| 250 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 176 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
| 251 if (tracing_enabled_) { | 177 if (FLAG_trace_concurrent_recompilation) { |
| 252 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 178 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
| 253 } | 179 } |
| 254 } | 180 } |
| 255 | 181 |
| 256 | 182 |
| 257 void OptimizingCompilerThread::Stop() { | 183 void OptimizingCompileDispatcher::Stop() { |
| 258 DCHECK(!IsOptimizerThread()); | 184 base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH)); |
| 259 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | |
| 260 if (FLAG_block_concurrent_recompilation) Unblock(); | 185 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 261 if (!job_based_recompilation_) { | 186 { |
| 262 input_queue_semaphore_.Signal(); | |
| 263 stop_semaphore_.Wait(); | |
| 264 } else { | |
| 265 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); | 187 base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_); |
| 266 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); | 188 while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_); |
| 267 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE)); | 189 base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE)); |
| 268 } | 190 } |
| 269 | 191 |
| 270 if (recompilation_delay_ != 0) { | 192 if (recompilation_delay_ != 0) { |
| 271 // At this point the optimizing compiler thread's event loop has stopped. | 193 // At this point the optimizing compiler thread's event loop has stopped. |
| 272 // There is no need for a mutex when reading input_queue_length_. | 194 // There is no need for a mutex when reading input_queue_length_. |
| 273 while (input_queue_length_ > 0) CompileNext(NextInput()); | 195 while (input_queue_length_ > 0) CompileNext(NextInput()); |
| 274 InstallOptimizedFunctions(); | 196 InstallOptimizedFunctions(); |
| 275 } else { | 197 } else { |
| 276 FlushInputQueue(false); | |
| 277 FlushOutputQueue(false); | 198 FlushOutputQueue(false); |
| 278 } | 199 } |
| 279 | 200 |
| 280 if (FLAG_concurrent_osr) FlushOsrBuffer(false); | 201 if (FLAG_concurrent_osr) FlushOsrBuffer(false); |
| 281 | 202 |
| 282 if (tracing_enabled_) { | 203 if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) && |
| 283 double percentage = time_spent_compiling_.PercentOf(time_spent_total_); | 204 FLAG_concurrent_osr) { |
| 284 if (job_based_recompilation_) percentage = 100.0; | |
| 285 PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage); | |
| 286 } | |
| 287 | |
| 288 if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) { | |
| 289 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); | 205 PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_); |
| 290 } | 206 } |
| 291 | |
| 292 Join(); | |
| 293 } | 207 } |
| 294 | 208 |
| 295 | 209 |
| 296 void OptimizingCompilerThread::InstallOptimizedFunctions() { | 210 void OptimizingCompileDispatcher::InstallOptimizedFunctions() { |
| 297 DCHECK(!IsOptimizerThread()); | |
| 298 HandleScope handle_scope(isolate_); | 211 HandleScope handle_scope(isolate_); |
| 299 | 212 |
| 300 OptimizedCompileJob* job; | 213 base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_); |
| 301 while (output_queue_.Dequeue(&job)) { | 214 while (!output_queue_.empty()) { |
| 215 OptimizedCompileJob* job = output_queue_.front(); |
| 216 output_queue_.pop(); |
| 302 CompilationInfo* info = job->info(); | 217 CompilationInfo* info = job->info(); |
| 303 Handle<JSFunction> function(*info->closure()); | 218 Handle<JSFunction> function(*info->closure()); |
| 304 if (info->is_osr()) { | 219 if (info->is_osr()) { |
| 305 if (FLAG_trace_osr) { | 220 if (FLAG_trace_osr) { |
| 306 PrintF("[COSR - "); | 221 PrintF("[COSR - "); |
| 307 function->ShortPrint(); | 222 function->ShortPrint(); |
| 308 PrintF(" is ready for install and entry at AST id %d]\n", | 223 PrintF(" is ready for install and entry at AST id %d]\n", |
| 309 info->osr_ast_id().ToInt()); | 224 info->osr_ast_id().ToInt()); |
| 310 } | 225 } |
| 311 job->WaitForInstall(); | 226 job->WaitForInstall(); |
| 312 // Remove stack check that guards OSR entry on original code. | 227 // Remove stack check that guards OSR entry on original code. |
| 313 Handle<Code> code = info->unoptimized_code(); | 228 Handle<Code> code = info->unoptimized_code(); |
| 314 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); | 229 uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id()); |
| 315 BackEdgeTable::RemoveStackCheck(code, offset); | 230 BackEdgeTable::RemoveStackCheck(code, offset); |
| 316 } else { | 231 } else { |
| 317 if (function->IsOptimized()) { | 232 if (function->IsOptimized()) { |
| 318 if (tracing_enabled_) { | 233 if (FLAG_trace_concurrent_recompilation) { |
| 319 PrintF(" ** Aborting compilation for "); | 234 PrintF(" ** Aborting compilation for "); |
| 320 function->ShortPrint(); | 235 function->ShortPrint(); |
| 321 PrintF(" as it has already been optimized.\n"); | 236 PrintF(" as it has already been optimized.\n"); |
| 322 } | 237 } |
| 323 DisposeOptimizedCompileJob(job, false); | 238 DisposeOptimizedCompileJob(job, false); |
| 324 } else { | 239 } else { |
| 325 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job); | 240 Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job); |
| 326 function->ReplaceCode( | 241 function->ReplaceCode(code.is_null() ? function->shared()->code() |
| 327 code.is_null() ? function->shared()->code() : *code); | 242 : *code); |
| 328 } | 243 } |
| 329 } | 244 } |
| 330 } | 245 } |
| 331 } | 246 } |
| 332 | 247 |
| 333 | 248 |
| 334 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) { | 249 void OptimizingCompileDispatcher::QueueForOptimization( |
| 250 OptimizedCompileJob* job) { |
| 335 DCHECK(IsQueueAvailable()); | 251 DCHECK(IsQueueAvailable()); |
| 336 DCHECK(!IsOptimizerThread()); | |
| 337 CompilationInfo* info = job->info(); | 252 CompilationInfo* info = job->info(); |
| 338 if (info->is_osr()) { | 253 if (info->is_osr()) { |
| 339 osr_attempts_++; | 254 osr_attempts_++; |
| 340 AddToOsrBuffer(job); | 255 AddToOsrBuffer(job); |
| 341 // Add job to the front of the input queue. | 256 // Add job to the front of the input queue. |
| 342 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 257 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
| 343 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 258 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
| 344 // Move shift_ back by one. | 259 // Move shift_ back by one. |
| 345 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); | 260 input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1); |
| 346 input_queue_[InputQueueIndex(0)] = job; | 261 input_queue_[InputQueueIndex(0)] = job; |
| 347 input_queue_length_++; | 262 input_queue_length_++; |
| 348 } else { | 263 } else { |
| 349 // Add job to the back of the input queue. | 264 // Add job to the back of the input queue. |
| 350 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 265 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
| 351 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 266 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
| 352 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 267 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 353 input_queue_length_++; | 268 input_queue_length_++; |
| 354 } | 269 } |
| 355 if (FLAG_block_concurrent_recompilation) { | 270 if (FLAG_block_concurrent_recompilation) { |
| 356 blocked_jobs_++; | 271 blocked_jobs_++; |
| 357 } else if (job_based_recompilation_) { | 272 } else { |
| 358 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 273 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 359 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | 274 new CompileTask(isolate_), v8::Platform::kShortRunningTask); |
| 360 } else { | |
| 361 input_queue_semaphore_.Signal(); | |
| 362 } | 275 } |
| 363 } | 276 } |
| 364 | 277 |
| 365 | 278 |
| 366 void OptimizingCompilerThread::Unblock() { | 279 void OptimizingCompileDispatcher::Unblock() { |
| 367 DCHECK(!IsOptimizerThread()); | |
| 368 while (blocked_jobs_ > 0) { | 280 while (blocked_jobs_ > 0) { |
| 369 if (job_based_recompilation_) { | 281 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 370 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 282 new CompileTask(isolate_), v8::Platform::kShortRunningTask); |
| 371 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | |
| 372 } else { | |
| 373 input_queue_semaphore_.Signal(); | |
| 374 } | |
| 375 blocked_jobs_--; | 283 blocked_jobs_--; |
| 376 } | 284 } |
| 377 } | 285 } |
| 378 | 286 |
| 379 | 287 |
| 380 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate( | 288 OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate( |
| 381 Handle<JSFunction> function, BailoutId osr_ast_id) { | 289 Handle<JSFunction> function, BailoutId osr_ast_id) { |
| 382 DCHECK(!IsOptimizerThread()); | |
| 383 for (int i = 0; i < osr_buffer_capacity_; i++) { | 290 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 384 OptimizedCompileJob* current = osr_buffer_[i]; | 291 OptimizedCompileJob* current = osr_buffer_[i]; |
| 385 if (current != NULL && | 292 if (current != NULL && current->IsWaitingForInstall() && |
| 386 current->IsWaitingForInstall() && | |
| 387 current->info()->HasSameOsrEntry(function, osr_ast_id)) { | 293 current->info()->HasSameOsrEntry(function, osr_ast_id)) { |
| 388 osr_hits_++; | 294 osr_hits_++; |
| 389 osr_buffer_[i] = NULL; | 295 osr_buffer_[i] = NULL; |
| 390 return current; | 296 return current; |
| 391 } | 297 } |
| 392 } | 298 } |
| 393 return NULL; | 299 return NULL; |
| 394 } | 300 } |
| 395 | 301 |
| 396 | 302 |
| 397 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function, | 303 bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function, |
| 398 BailoutId osr_ast_id) { | 304 BailoutId osr_ast_id) { |
| 399 DCHECK(!IsOptimizerThread()); | |
| 400 for (int i = 0; i < osr_buffer_capacity_; i++) { | 305 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 401 OptimizedCompileJob* current = osr_buffer_[i]; | 306 OptimizedCompileJob* current = osr_buffer_[i]; |
| 402 if (current != NULL && | 307 if (current != NULL && |
| 403 current->info()->HasSameOsrEntry(function, osr_ast_id)) { | 308 current->info()->HasSameOsrEntry(function, osr_ast_id)) { |
| 404 return !current->IsWaitingForInstall(); | 309 return !current->IsWaitingForInstall(); |
| 405 } | 310 } |
| 406 } | 311 } |
| 407 return false; | 312 return false; |
| 408 } | 313 } |
| 409 | 314 |
| 410 | 315 |
| 411 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) { | 316 bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) { |
| 412 DCHECK(!IsOptimizerThread()); | |
| 413 for (int i = 0; i < osr_buffer_capacity_; i++) { | 317 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 414 OptimizedCompileJob* current = osr_buffer_[i]; | 318 OptimizedCompileJob* current = osr_buffer_[i]; |
| 415 if (current != NULL && *current->info()->closure() == function) { | 319 if (current != NULL && *current->info()->closure() == function) { |
| 416 return !current->IsWaitingForInstall(); | 320 return !current->IsWaitingForInstall(); |
| 417 } | 321 } |
| 418 } | 322 } |
| 419 return false; | 323 return false; |
| 420 } | 324 } |
| 421 | 325 |
| 422 | 326 |
| 423 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) { | 327 void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) { |
| 424 DCHECK(!IsOptimizerThread()); | |
| 425 // Find the next slot that is empty or has a stale job. | 328 // Find the next slot that is empty or has a stale job. |
| 426 OptimizedCompileJob* stale = NULL; | 329 OptimizedCompileJob* stale = NULL; |
| 427 while (true) { | 330 while (true) { |
| 428 stale = osr_buffer_[osr_buffer_cursor_]; | 331 stale = osr_buffer_[osr_buffer_cursor_]; |
| 429 if (stale == NULL || stale->IsWaitingForInstall()) break; | 332 if (stale == NULL || stale->IsWaitingForInstall()) break; |
| 430 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | 333 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| 431 } | 334 } |
| 432 | 335 |
| 433 // Add to found slot and dispose the evicted job. | 336 // Add to found slot and dispose the evicted job. |
| 434 if (stale != NULL) { | 337 if (stale != NULL) { |
| 435 DCHECK(stale->IsWaitingForInstall()); | 338 DCHECK(stale->IsWaitingForInstall()); |
| 436 CompilationInfo* info = stale->info(); | 339 CompilationInfo* info = stale->info(); |
| 437 if (FLAG_trace_osr) { | 340 if (FLAG_trace_osr) { |
| 438 PrintF("[COSR - Discarded "); | 341 PrintF("[COSR - Discarded "); |
| 439 info->closure()->PrintName(); | 342 info->closure()->PrintName(); |
| 440 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); | 343 PrintF(", AST id %d]\n", info->osr_ast_id().ToInt()); |
| 441 } | 344 } |
| 442 DisposeOptimizedCompileJob(stale, false); | 345 DisposeOptimizedCompileJob(stale, false); |
| 443 } | 346 } |
| 444 osr_buffer_[osr_buffer_cursor_] = job; | 347 osr_buffer_[osr_buffer_cursor_] = job; |
| 445 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; | 348 osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_; |
| 446 } | 349 } |
| 447 | |
| 448 | |
| 449 #ifdef DEBUG | |
| 450 bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) { | |
| 451 return isolate->concurrent_recompilation_enabled() && | |
| 452 isolate->optimizing_compiler_thread()->IsOptimizerThread(); | |
| 453 } | 350 } |
| 454 | 351 } // namespace v8::internal |
| 455 | |
| 456 bool OptimizingCompilerThread::IsOptimizerThread() { | |
| 457 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | |
| 458 return ThreadId::Current().ToInteger() == thread_id_; | |
| 459 } | |
| 460 #endif | |
| 461 | |
| 462 | |
| 463 } } // namespace v8::internal | |
| OLD | NEW |