| Index: src/optimizing-compiler-thread.cc
|
| diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
|
| index 1f77d5ae05438ecbea55d04c6e6bf9a1e06e5fa4..6c6b9bd9ed70542eeda99bcedd12d99a54296d14 100644
|
| --- a/src/optimizing-compiler-thread.cc
|
| +++ b/src/optimizing-compiler-thread.cc
|
| @@ -108,12 +108,18 @@ void OptimizingCompilerThread::CompileNext() {
|
| // The function may have already been optimized by OSR. Simply continue.
|
| // Use a mutex to make sure that functions marked for install
|
| // are always also queued.
|
| - LockGuard<Mutex> mark_and_queue(&install_mutex_);
|
| - { Heap::RelocationLock relocation_lock(isolate_->heap());
|
| + if (!optimizing_compiler->info()->osr_ast_id().IsNone()) {
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
|
| + osr_candidates_.RemoveElement(optimizing_compiler);
|
| + ready_for_osr_.Add(optimizing_compiler);
|
| + } else {
|
| + LockGuard<Mutex> mark_and_queue(&install_mutex_);
|
| + Heap::RelocationLock relocation_lock(isolate_->heap());
|
| AllowHandleDereference ahd;
|
| optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode();
|
| + output_queue_.Enqueue(optimizing_compiler);
|
| }
|
| - output_queue_.Enqueue(optimizing_compiler);
|
| }
|
|
|
|
|
| @@ -145,6 +151,14 @@ void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
|
| }
|
| delete info;
|
| }
|
| +
|
| + osr_candidates_.Clear();
|
| + while (ready_for_osr_.length() > 0) {
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + optimizing_compiler = ready_for_osr_.RemoveLast();
|
| + CompilationInfo* info = optimizing_compiler->info();
|
| + delete info;
|
| + }
|
| }
|
|
|
|
|
| @@ -179,6 +193,10 @@ void OptimizingCompilerThread::Stop() {
|
| PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
|
| }
|
|
|
| + if (FLAG_trace_osr && FLAG_speculative_concurrent_osr) {
|
| + PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
|
| + }
|
| +
|
| Join();
|
| }
|
|
|
| @@ -194,6 +212,10 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
|
| }
|
| Compiler::InstallOptimizedCode(compiler);
|
| }
|
| +
|
| + // Remove the oldest OSR candidates that are ready so that we
|
| + // only have limited number of them waiting.
|
| + if (FLAG_speculative_concurrent_osr) RemoveStaleOSRCandidates();
|
| }
|
|
|
|
|
| @@ -202,12 +224,68 @@ void OptimizingCompilerThread::QueueForOptimization(
|
| ASSERT(IsQueueAvailable());
|
| ASSERT(!IsOptimizerThread());
|
| Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1));
|
| - optimizing_compiler->info()->closure()->MarkInRecompileQueue();
|
| + if (optimizing_compiler->info()->osr_ast_id().IsNone()) {
|
| + optimizing_compiler->info()->closure()->MarkInRecompileQueue();
|
| + } else {
|
| + LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + osr_candidates_.Add(optimizing_compiler);
|
| + osr_attempts_++;
|
| + }
|
| input_queue_.Enqueue(optimizing_compiler);
|
| input_queue_semaphore_->Signal();
|
| }
|
|
|
|
|
| +OptimizingCompiler* OptimizingCompilerThread::FindReadyOSRCandidate(
|
| + Handle<JSFunction> function, uint32_t osr_pc_offset) {
|
| + ASSERT(!IsOptimizerThread());
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
|
| + for (int i = 0; i < ready_for_osr_.length(); i++) {
|
| + if (ready_for_osr_[i]->osr_pc_offset() == osr_pc_offset &&
|
| + ready_for_osr_[i]->info()->closure().is_identical_to(function)) {
|
| + osr_hits_++;
|
| + return ready_for_osr_.Remove(i);
|
| + }
|
| + }
|
| + return NULL;
|
| +}
|
| +
|
| +
|
| +bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
|
| + uint32_t osr_pc_offset) {
|
| + ASSERT(!IsOptimizerThread());
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
|
| + for (int i = 0; i < osr_candidates_.length(); i++) {
|
| + if (osr_candidates_[i]->osr_pc_offset() == osr_pc_offset &&
|
| + osr_candidates_[i]->info()->closure().is_identical_to(function)) {
|
| + return true;
|
| + }
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +void OptimizingCompilerThread::RemoveStaleOSRCandidates(int limit) {
|
| + ASSERT(!IsOptimizerThread());
|
| + ASSERT(FLAG_speculative_concurrent_osr);
|
| + LockGuard<Mutex> access_osr_lists(&osr_list_mutex_);
|
| + while (ready_for_osr_.length() > limit) {
|
| + OptimizingCompiler* compiler = ready_for_osr_.Remove(0);
|
| + CompilationInfo* throw_away = compiler->info();
|
| + if (FLAG_trace_osr) {
|
| + PrintF(" ** Discarded ");
|
| + throw_away->closure()->PrintName();
|
| + PrintF(" that has been concurrently compiled for OSR at %d.\n",
|
| + throw_away->osr_ast_id().ToInt());
|
| + }
|
| + delete throw_away;
|
| + }
|
| +}
|
| +
|
| +
|
| #ifdef DEBUG
|
| bool OptimizingCompilerThread::IsOptimizerThread() {
|
| if (!FLAG_concurrent_recompilation) return false;
|
|
|