Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/optimizing-compiler-thread.h" | 5 #include "src/optimizing-compiler-thread.h" |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/full-codegen.h" | 10 #include "src/full-codegen.h" |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 35 } | 35 } |
| 36 } | 36 } |
| 37 delete info; | 37 delete info; |
| 38 } | 38 } |
| 39 | 39 |
| 40 } // namespace | 40 } // namespace |
| 41 | 41 |
| 42 | 42 |
| 43 class OptimizingCompilerThread::CompileTask : public v8::Task { | 43 class OptimizingCompilerThread::CompileTask : public v8::Task { |
| 44 public: | 44 public: |
| 45 explicit CompileTask(Isolate* isolate) : isolate_(isolate) {} | 45 explicit CompileTask(Isolate* isolate) : isolate_(isolate) { |
| 46 base::NoBarrier_AtomicIncrement( | |
| 47 &isolate_->optimizing_compiler_thread()->ref_count_, 1); | |
| 48 } | |
| 46 | 49 |
| 47 virtual ~CompileTask() {} | 50 virtual ~CompileTask() {} |
| 48 | 51 |
| 49 private: | 52 private: |
| 50 // v8::Task overrides. | 53 // v8::Task overrides. |
| 51 void Run() OVERRIDE { | 54 void Run() OVERRIDE { |
| 52 DisallowHeapAllocation no_allocation; | 55 DisallowHeapAllocation no_allocation; |
| 53 DisallowHandleAllocation no_handles; | 56 DisallowHandleAllocation no_handles; |
| 54 DisallowHandleDereference no_deref; | 57 DisallowHandleDereference no_deref; |
| 55 | 58 |
| 56 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread(); | 59 OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread(); |
| 60 StopFlag flag; | |
| 57 | 61 |
| 58 { | 62 { |
| 59 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); | 63 TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_); |
| 60 | 64 |
| 61 if (thread->recompilation_delay_ != 0) { | 65 if (thread->recompilation_delay_ != 0) { |
| 62 base::OS::Sleep(thread->recompilation_delay_); | 66 base::OS::Sleep(thread->recompilation_delay_); |
| 63 } | 67 } |
| 64 | 68 |
| 65 StopFlag flag; | |
| 66 OptimizedCompileJob* job = thread->NextInput(&flag); | 69 OptimizedCompileJob* job = thread->NextInput(&flag); |
| 67 | 70 |
| 68 if (flag == CONTINUE) { | 71 switch (flag) { |
| 69 thread->CompileNext(job); | 72 case CONTINUE: |
| 70 } else { | 73 thread->CompileNext(job); |
| 71 AllowHandleDereference allow_handle_dereference; | 74 break; |
| 72 if (!job->info()->is_osr()) { | 75 |
| 73 DisposeOptimizedCompileJob(job, true); | 76 case STOP: |
| 77 case FLUSH: { | |
| 78 AllowHandleDereference allow_handle_dereference; | |
| 79 if (!job->info()->is_osr()) { | |
| 80 DisposeOptimizedCompileJob(job, true); | |
| 81 } | |
| 82 break; | |
| 74 } | 83 } |
| 75 } | 84 } |
| 76 } | 85 } |
| 86 if (flag == STOP) { | |
| 87 base::Release_Store(&thread->stop_thread_, | |
| 88 static_cast<base::AtomicWord>(CONTINUE)); | |
| 89 thread->stop_semaphore_.Signal(); | |
| 90 } | |
| 77 | 91 |
| 78 bool signal = false; | 92 if (base::NoBarrier_AtomicIncrement(&thread->ref_count_, -1) == 0) { |
| 79 { | 93 thread->stop_semaphore_.Signal(); |
| 80 base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_); | |
| 81 if (--thread->task_count_ == 0) { | |
| 82 if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) == | |
| 83 FLUSH) { | |
| 84 base::Release_Store(&thread->stop_thread_, | |
| 85 static_cast<base::AtomicWord>(CONTINUE)); | |
| 86 signal = true; | |
| 87 } | |
| 88 } | |
| 89 } | 94 } |
| 90 if (signal) thread->stop_semaphore_.Signal(); | |
| 91 } | 95 } |
| 92 | 96 |
| 93 Isolate* isolate_; | 97 Isolate* isolate_; |
| 94 | 98 |
| 95 DISALLOW_COPY_AND_ASSIGN(CompileTask); | 99 DISALLOW_COPY_AND_ASSIGN(CompileTask); |
| 96 }; | 100 }; |
| 97 | 101 |
| 98 | 102 |
| 99 OptimizingCompilerThread::~OptimizingCompilerThread() { | 103 OptimizingCompilerThread::~OptimizingCompilerThread() { |
| 104 if (base::NoBarrier_AtomicIncrement(&ref_count_, -1) > 0) { | |
| 105 stop_semaphore_.Wait(); | |
| 106 } | |
| 100 DCHECK_EQ(0, input_queue_length_); | 107 DCHECK_EQ(0, input_queue_length_); |
| 101 DeleteArray(input_queue_); | 108 DeleteArray(input_queue_); |
| 102 if (FLAG_concurrent_osr) { | 109 if (FLAG_concurrent_osr) { |
| 103 #ifdef DEBUG | 110 #ifdef DEBUG |
| 104 for (int i = 0; i < osr_buffer_capacity_; i++) { | 111 for (int i = 0; i < osr_buffer_capacity_; i++) { |
| 105 CHECK_NULL(osr_buffer_[i]); | 112 CHECK_NULL(osr_buffer_[i]); |
| 106 } | 113 } |
| 107 #endif | 114 #endif |
| 108 DeleteArray(osr_buffer_); | 115 DeleteArray(osr_buffer_); |
| 109 } | 116 } |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 175 UNREACHABLE(); | 182 UNREACHABLE(); |
| 176 *flag = CONTINUE; | 183 *flag = CONTINUE; |
| 177 } | 184 } |
| 178 return NULL; | 185 return NULL; |
| 179 } | 186 } |
| 180 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; | 187 OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)]; |
| 181 DCHECK_NOT_NULL(job); | 188 DCHECK_NOT_NULL(job); |
| 182 input_queue_shift_ = InputQueueIndex(1); | 189 input_queue_shift_ = InputQueueIndex(1); |
| 183 input_queue_length_--; | 190 input_queue_length_--; |
| 184 if (flag) { | 191 if (flag) { |
| 185 *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)); | 192 switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) { |
| 193 case CONTINUE: | |
| 194 *flag = CONTINUE; | |
| 195 break; | |
| 196 | |
| 197 case FLUSH: | |
| 198 if (input_queue_length_ == 0) | |
| 199 *flag = STOP; | |
|
Yang
2015/02/24 09:57:13
I'd prefer having brackets around if-else blocks.
| |
| 200 else | |
| 201 *flag = FLUSH; | |
| 202 break; | |
| 203 | |
| 204 case STOP: | |
| 205 UNREACHABLE(); | |
| 206 *flag = CONTINUE; | |
| 207 break; | |
| 208 } | |
| 186 } | 209 } |
| 187 return job; | 210 return job; |
| 188 } | 211 } |
| 189 | 212 |
| 190 | 213 |
| 191 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { | 214 void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) { |
| 192 DCHECK_NOT_NULL(job); | 215 DCHECK_NOT_NULL(job); |
| 193 | 216 |
| 194 // The function may have already been optimized by OSR. Simply continue. | 217 // The function may have already been optimized by OSR. Simply continue. |
| 195 OptimizedCompileJob::Status status = job->OptimizeGraph(); | 218 OptimizedCompileJob::Status status = job->OptimizeGraph(); |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 240 osr_buffer_[i] = NULL; | 263 osr_buffer_[i] = NULL; |
| 241 } | 264 } |
| 242 } | 265 } |
| 243 } | 266 } |
| 244 | 267 |
| 245 | 268 |
| 246 void OptimizingCompilerThread::Flush() { | 269 void OptimizingCompilerThread::Flush() { |
| 247 DCHECK(!IsOptimizerThread()); | 270 DCHECK(!IsOptimizerThread()); |
| 248 bool block = true; | 271 bool block = true; |
| 249 if (job_based_recompilation_) { | 272 if (job_based_recompilation_) { |
| 250 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); | 273 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 251 block = task_count_ > 0 || blocked_jobs_ > 0; | 274 { |
| 252 if (block) { | 275 base::LockGuard<base::Mutex> lock(&input_queue_mutex_); |
| 253 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 276 block = input_queue_length_ > 0; |
| 277 if (block) { | |
| 278 base::Release_Store(&stop_thread_, | |
| 279 static_cast<base::AtomicWord>(FLUSH)); | |
| 280 } | |
| 254 } | 281 } |
| 255 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 256 } else { | 282 } else { |
| 257 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 283 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); |
| 258 if (FLAG_block_concurrent_recompilation) Unblock(); | 284 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 259 } | 285 } |
| 260 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); | 286 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); |
| 261 if (block) stop_semaphore_.Wait(); | 287 if (block) stop_semaphore_.Wait(); |
| 262 FlushOutputQueue(true); | 288 FlushOutputQueue(true); |
| 263 if (FLAG_concurrent_osr) FlushOsrBuffer(true); | 289 if (FLAG_concurrent_osr) FlushOsrBuffer(true); |
| 264 if (tracing_enabled_) { | 290 if (tracing_enabled_) { |
| 265 PrintF(" ** Flushed concurrent recompilation queues.\n"); | 291 PrintF(" ** Flushed concurrent recompilation queues.\n"); |
| 266 } | 292 } |
| 267 } | 293 } |
| 268 | 294 |
| 269 | 295 |
| 270 void OptimizingCompilerThread::Stop() { | 296 void OptimizingCompilerThread::Stop() { |
| 271 DCHECK(!IsOptimizerThread()); | 297 DCHECK(!IsOptimizerThread()); |
| 272 bool block = true; | 298 bool block = true; |
| 273 if (job_based_recompilation_) { | 299 if (job_based_recompilation_) { |
| 274 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); | 300 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 275 block = task_count_ > 0 || blocked_jobs_ > 0; | 301 { |
| 276 if (block) { | 302 base::LockGuard<base::Mutex> lock(&input_queue_mutex_); |
| 277 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH)); | 303 block = input_queue_length_ > 0; |
| 304 if (block) { | |
| 305 base::Release_Store(&stop_thread_, | |
| 306 static_cast<base::AtomicWord>(FLUSH)); | |
| 307 } | |
| 278 } | 308 } |
| 279 if (FLAG_block_concurrent_recompilation) Unblock(); | |
| 280 } else { | 309 } else { |
| 281 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); | 310 base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP)); |
| 282 if (FLAG_block_concurrent_recompilation) Unblock(); | 311 if (FLAG_block_concurrent_recompilation) Unblock(); |
| 283 } | 312 } |
| 284 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); | 313 if (!job_based_recompilation_) input_queue_semaphore_.Signal(); |
| 285 if (block) stop_semaphore_.Wait(); | 314 if (block) stop_semaphore_.Wait(); |
| 286 | 315 |
| 287 if (recompilation_delay_ != 0) { | 316 if (recompilation_delay_ != 0) { |
| 288 // At this point the optimizing compiler thread's event loop has stopped. | 317 // At this point the optimizing compiler thread's event loop has stopped. |
| 289 // There is no need for a mutex when reading input_queue_length_. | 318 // There is no need for a mutex when reading input_queue_length_. |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 365 } else { | 394 } else { |
| 366 // Add job to the back of the input queue. | 395 // Add job to the back of the input queue. |
| 367 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); | 396 base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_); |
| 368 DCHECK_LT(input_queue_length_, input_queue_capacity_); | 397 DCHECK_LT(input_queue_length_, input_queue_capacity_); |
| 369 input_queue_[InputQueueIndex(input_queue_length_)] = job; | 398 input_queue_[InputQueueIndex(input_queue_length_)] = job; |
| 370 input_queue_length_++; | 399 input_queue_length_++; |
| 371 } | 400 } |
| 372 if (FLAG_block_concurrent_recompilation) { | 401 if (FLAG_block_concurrent_recompilation) { |
| 373 blocked_jobs_++; | 402 blocked_jobs_++; |
| 374 } else if (job_based_recompilation_) { | 403 } else if (job_based_recompilation_) { |
| 375 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); | |
| 376 ++task_count_; | |
| 377 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 404 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 378 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | 405 new CompileTask(isolate_), v8::Platform::kShortRunningTask); |
| 379 } else { | 406 } else { |
| 380 input_queue_semaphore_.Signal(); | 407 input_queue_semaphore_.Signal(); |
| 381 } | 408 } |
| 382 } | 409 } |
| 383 | 410 |
| 384 | 411 |
| 385 void OptimizingCompilerThread::Unblock() { | 412 void OptimizingCompilerThread::Unblock() { |
| 386 DCHECK(!IsOptimizerThread()); | 413 DCHECK(!IsOptimizerThread()); |
| 387 { | |
| 388 base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_); | |
| 389 task_count_ += blocked_jobs_; | |
| 390 } | |
| 391 while (blocked_jobs_ > 0) { | 414 while (blocked_jobs_ > 0) { |
| 392 if (job_based_recompilation_) { | 415 if (job_based_recompilation_) { |
| 393 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 416 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 394 new CompileTask(isolate_), v8::Platform::kShortRunningTask); | 417 new CompileTask(isolate_), v8::Platform::kShortRunningTask); |
| 395 } else { | 418 } else { |
| 396 input_queue_semaphore_.Signal(); | 419 input_queue_semaphore_.Signal(); |
| 397 } | 420 } |
| 398 blocked_jobs_--; | 421 blocked_jobs_--; |
| 399 } | 422 } |
| 400 } | 423 } |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 477 | 500 |
| 478 | 501 |
| 479 bool OptimizingCompilerThread::IsOptimizerThread() { | 502 bool OptimizingCompilerThread::IsOptimizerThread() { |
| 480 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); | 503 base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_); |
| 481 return ThreadId::Current().ToInteger() == thread_id_; | 504 return ThreadId::Current().ToInteger() == thread_id_; |
| 482 } | 505 } |
| 483 #endif | 506 #endif |
| 484 | 507 |
| 485 | 508 |
| 486 } } // namespace v8::internal | 509 } } // namespace v8::internal |
| OLD | NEW |