Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(33)

Unified Diff: src/optimizing-compiler-thread.cc

Issue 816363003: Implement missing functionality for job based recompilation (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: updates Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/optimizing-compiler-thread.cc
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 3b15a403d69d5218182aa7528f7226404d8478a8..6926f47ef29e4dda1c57ca5cf7e4b75e81971d04 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -15,10 +15,34 @@
namespace v8 {
namespace internal {
+namespace {
+
+void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
+ bool restore_function_code) {
+ // The recompile job is allocated in the CompilationInfo's zone.
+ CompilationInfo* info = job->info();
+ if (restore_function_code) {
+ if (info->is_osr()) {
+ if (!job->IsWaitingForInstall()) {
+ // Remove stack check that guards OSR entry on original code.
+ Handle<Code> code = info->unoptimized_code();
+ uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+ BackEdgeTable::RemoveStackCheck(code, offset);
+ }
+ } else {
+ Handle<JSFunction> function = info->closure();
+ function->ReplaceCode(function->shared()->code());
+ }
+ }
+ delete info;
+}
+
+} // namespace
+
+
class OptimizingCompilerThread::CompileTask : public v8::Task {
public:
- CompileTask(Isolate* isolate, OptimizedCompileJob* job)
- : isolate_(isolate), job_(job) {}
+ explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
virtual ~CompileTask() {}
@@ -29,30 +53,41 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- // The function may have already been optimized by OSR. Simply continue.
- OptimizedCompileJob::Status status = job_->OptimizeGraph();
- USE(status); // Prevent an unused-variable error in release mode.
- DCHECK(status != OptimizedCompileJob::FAILED);
+ TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
- // The function may have already been optimized by OSR. Simply continue.
- // Use a mutex to make sure that functions marked for install
- // are always also queued.
- {
- base::LockGuard<base::Mutex> lock_guard(
- &isolate_->optimizing_compiler_thread()->output_queue_mutex_);
- isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_);
+ OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
+
+ if (thread->recompilation_delay_ != 0) {
+ base::OS::Sleep(thread->recompilation_delay_);
+ }
+
+ StopFlag flag;
+ OptimizedCompileJob* job = thread->NextInput(&flag);
+
+ if (flag == CONTINUE) {
+ thread->CompileNext(job);
+ } else {
+ AllowHandleDereference allow_handle_dereference;
+ if (!job->info()->is_osr()) {
+ DisposeOptimizedCompileJob(job, true);
+ }
}
- isolate_->stack_guard()->RequestInstallCode();
+ bool signal = false;
{
- base::LockGuard<base::Mutex> lock_guard(
- &isolate_->optimizing_compiler_thread()->input_queue_mutex_);
- isolate_->optimizing_compiler_thread()->input_queue_length_--;
+ base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
+ if (--thread->task_count_ == 0) {
+ if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
+ FLUSH) {
+ base::Release_Store(&thread->stop_thread_,
+ static_cast<base::AtomicWord>(CONTINUE));
+ signal = true;
+ }
+ }
}
- isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal();
+ if (signal) thread->stop_semaphore_.Signal();
}
Isolate* isolate_;
- OptimizedCompileJob* job_;
DISALLOW_COPY_AND_ASSIGN(CompileTask);
};
@@ -93,8 +128,8 @@ void OptimizingCompilerThread::Run() {
input_queue_semaphore_.Wait();
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
- if (FLAG_concurrent_recompilation_delay != 0) {
- base::OS::Sleep(FLAG_concurrent_recompilation_delay);
+ if (recompilation_delay_ != 0) {
+ base::OS::Sleep(recompilation_delay_);
}
switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
@@ -121,7 +156,7 @@ void OptimizingCompilerThread::Run() {
base::ElapsedTimer compiling_timer;
if (tracing_enabled_) compiling_timer.Start();
- CompileNext();
+ CompileNext(NextInput());
if (tracing_enabled_) {
time_spent_compiling_ += compiling_timer.Elapsed();
@@ -130,20 +165,27 @@ void OptimizingCompilerThread::Run() {
}
-OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
- DCHECK(!job_based_recompilation_);
- if (input_queue_length_ == 0) return NULL;
+ if (input_queue_length_ == 0) {
+ if (flag) {
+ UNREACHABLE();
+ *flag = CONTINUE;
+ }
+ return NULL;
+ }
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
DCHECK_NE(NULL, job);
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
+ if (flag) {
+ *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
+ }
return job;
}
-void OptimizingCompilerThread::CompileNext() {
- OptimizedCompileJob* job = NextInput();
+void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
DCHECK_NE(NULL, job);
// The function may have already been optimized by OSR. Simply continue.
@@ -154,36 +196,17 @@ void OptimizingCompilerThread::CompileNext() {
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
+ if (job_based_recompilation_) output_queue_mutex_.Lock();
output_queue_.Enqueue(job);
+ if (job_based_recompilation_) output_queue_mutex_.Unlock();
isolate_->stack_guard()->RequestInstallCode();
}
-static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
- bool restore_function_code) {
- // The recompile job is allocated in the CompilationInfo's zone.
- CompilationInfo* info = job->info();
- if (restore_function_code) {
- if (info->is_osr()) {
- if (!job->IsWaitingForInstall()) {
- // Remove stack check that guards OSR entry on original code.
- Handle<Code> code = info->unoptimized_code();
- uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
- BackEdgeTable::RemoveStackCheck(code, offset);
- }
- } else {
- Handle<JSFunction> function = info->closure();
- function->ReplaceCode(function->shared()->code());
- }
- }
- delete info;
-}
-
-
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- DCHECK(!job_based_recompilation_);
OptimizedCompileJob* job;
while ((job = NextInput())) {
+ DCHECK(!job_based_recompilation_);
// This should not block, since we have one signal on the input queue
// semaphore corresponding to each element in the input queue.
input_queue_semaphore_.Wait();
@@ -196,6 +219,7 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+ base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
OptimizedCompileJob* job;
while (output_queue_.Dequeue(&job)) {
// OSR jobs are dealt with separately.
@@ -218,12 +242,20 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
void OptimizingCompilerThread::Flush() {
DCHECK(!IsOptimizerThread());
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
- if (FLAG_block_concurrent_recompilation) Unblock();
- if (!job_based_recompilation_) {
- input_queue_semaphore_.Signal();
- stop_semaphore_.Wait();
+ bool block = true;
+ if (job_based_recompilation_) {
+ base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+ block = task_count_ > 0 || blocked_jobs_ > 0;
+ if (block) {
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+ }
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ } else {
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+ if (FLAG_block_concurrent_recompilation) Unblock();
}
+ if (!job_based_recompilation_) input_queue_semaphore_.Signal();
+ if (block) stop_semaphore_.Wait();
FlushOutputQueue(true);
if (FLAG_concurrent_osr) FlushOsrBuffer(true);
if (tracing_enabled_) {
@@ -234,25 +266,25 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
DCHECK(!IsOptimizerThread());
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
- if (FLAG_block_concurrent_recompilation) Unblock();
- if (!job_based_recompilation_) {
- input_queue_semaphore_.Signal();
- stop_semaphore_.Wait();
- }
-
+ bool block = true;
if (job_based_recompilation_) {
- while (true) {
- {
- base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
- if (!input_queue_length_) break;
- }
- input_queue_semaphore_.Wait();
+ base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+ block = task_count_ > 0 || blocked_jobs_ > 0;
+ if (block) {
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
}
- } else if (FLAG_concurrent_recompilation_delay != 0) {
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ } else {
+ base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
+ if (FLAG_block_concurrent_recompilation) Unblock();
+ }
+ if (!job_based_recompilation_) input_queue_semaphore_.Signal();
+ if (block) stop_semaphore_.Wait();
+
+ if (recompilation_delay_ != 0) {
// At this point the optimizing compiler thread's event loop has stopped.
// There is no need for a mutex when reading input_queue_length_.
- while (input_queue_length_ > 0) CompileNext();
+ while (input_queue_length_ > 0) CompileNext(NextInput());
InstallOptimizedFunctions();
} else {
FlushInputQueue(false);
@@ -263,6 +295,7 @@ void OptimizingCompilerThread::Stop() {
if (tracing_enabled_) {
double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
+ if (job_based_recompilation_) percentage = 100.0;
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
}
@@ -333,11 +366,13 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
}
- if (job_based_recompilation_) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_, job), v8::Platform::kShortRunningTask);
- } else if (FLAG_block_concurrent_recompilation) {
+ if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
+ } else if (job_based_recompilation_) {
+ base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+ ++task_count_;
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompileTask(isolate_), v8::Platform::kShortRunningTask);
} else {
input_queue_semaphore_.Signal();
}
@@ -346,11 +381,17 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
void OptimizingCompilerThread::Unblock() {
DCHECK(!IsOptimizerThread());
- if (job_based_recompilation_) {
- return;
+ {
+ base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+ task_count_ += blocked_jobs_;
}
while (blocked_jobs_ > 0) {
- input_queue_semaphore_.Signal();
+ if (job_based_recompilation_) {
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+ } else {
+ input_queue_semaphore_.Signal();
+ }
blocked_jobs_--;
}
}
« no previous file with comments | « src/optimizing-compiler-thread.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698