Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(229)

Unified Diff: src/optimizing-compile-dispatcher.cc

Issue 1087763003: Reland "Remove support for thread-based recompilation" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: fix Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/optimizing-compile-dispatcher.h ('k') | src/optimizing-compiler-thread.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/optimizing-compile-dispatcher.cc
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compile-dispatcher.cc
similarity index 56%
rename from src/optimizing-compiler-thread.cc
rename to src/optimizing-compile-dispatcher.cc
index eda4f5ca9d9cb868a6c5df4cbc4b2675ff31f662..e422913241047d02e7de1a3448aee6b64b2acb8b 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compile-dispatcher.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/optimizing-compiler-thread.h"
+#include "src/optimizing-compile-dispatcher.h"
#include "src/v8.h"
@@ -10,7 +10,6 @@
#include "src/full-codegen.h"
#include "src/hydrogen.h"
#include "src/isolate.h"
-#include "src/v8threads.h"
namespace v8 {
namespace internal {
@@ -40,12 +39,13 @@ void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
} // namespace
-class OptimizingCompilerThread::CompileTask : public v8::Task {
+class OptimizingCompileDispatcher::CompileTask : public v8::Task {
public:
explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
- OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
- base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
- ++thread->ref_count_;
+ OptimizingCompileDispatcher* dispatcher =
+ isolate_->optimizing_compile_dispatcher();
+ base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
+ ++dispatcher->ref_count_;
}
virtual ~CompileTask() {}
@@ -57,20 +57,21 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
DisallowHandleAllocation no_handles;
DisallowHandleDereference no_deref;
- OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
+ OptimizingCompileDispatcher* dispatcher =
+ isolate_->optimizing_compile_dispatcher();
{
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
- if (thread->recompilation_delay_ != 0) {
- base::OS::Sleep(thread->recompilation_delay_);
+ if (dispatcher->recompilation_delay_ != 0) {
+ base::OS::Sleep(dispatcher->recompilation_delay_);
}
- thread->CompileNext(thread->NextInput(true));
+ dispatcher->CompileNext(dispatcher->NextInput(true));
}
{
- base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
- if (--thread->ref_count_ == 0) {
- thread->ref_count_zero_.NotifyOne();
+ base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
+ if (--dispatcher->ref_count_ == 0) {
+ dispatcher->ref_count_zero_.NotifyOne();
}
}
}
@@ -81,7 +82,7 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
};
-OptimizingCompilerThread::~OptimizingCompilerThread() {
+OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
#ifdef DEBUG
{
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
@@ -101,65 +102,7 @@ OptimizingCompilerThread::~OptimizingCompilerThread() {
}
-void OptimizingCompilerThread::Run() {
-#ifdef DEBUG
- { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
- thread_id_ = ThreadId::Current().ToInteger();
- }
-#endif
- DisallowHeapAllocation no_allocation;
- DisallowHandleAllocation no_handles;
- DisallowHandleDereference no_deref;
-
- if (job_based_recompilation_) {
- return;
- }
-
- base::ElapsedTimer total_timer;
- if (tracing_enabled_) total_timer.Start();
-
- while (true) {
- input_queue_semaphore_.Wait();
- TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
-
- if (recompilation_delay_ != 0) {
- base::OS::Sleep(recompilation_delay_);
- }
-
- switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
- case CONTINUE:
- break;
- case STOP:
- if (tracing_enabled_) {
- time_spent_total_ = total_timer.Elapsed();
- }
- stop_semaphore_.Signal();
- return;
- case FLUSH:
- // The main thread is blocked, waiting for the stop semaphore.
- { AllowHandleDereference allow_handle_dereference;
- FlushInputQueue(true);
- }
- base::Release_Store(&stop_thread_,
- static_cast<base::AtomicWord>(CONTINUE));
- stop_semaphore_.Signal();
- // Return to start of consumer loop.
- continue;
- }
-
- base::ElapsedTimer compiling_timer;
- if (tracing_enabled_) compiling_timer.Start();
-
- CompileNext(NextInput());
-
- if (tracing_enabled_) {
- time_spent_compiling_ += compiling_timer.Elapsed();
- }
- }
-}
-
-
-OptimizedCompileJob* OptimizingCompilerThread::NextInput(
+OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
bool check_if_flushing) {
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
if (input_queue_length_ == 0) return NULL;
@@ -168,7 +111,7 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(
input_queue_shift_ = InputQueueIndex(1);
input_queue_length_--;
if (check_if_flushing) {
- if (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)) != CONTINUE) {
+ if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
if (!job->info()->is_osr()) {
AllowHandleDereference allow_handle_dereference;
DisposeOptimizedCompileJob(job, true);
@@ -180,42 +123,33 @@ OptimizedCompileJob* OptimizingCompilerThread::NextInput(
}
-void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
if (!job) return;
// The function may have already been optimized by OSR. Simply continue.
OptimizedCompileJob::Status status = job->OptimizeGraph();
- USE(status); // Prevent an unused-variable error in release mode.
+ USE(status); // Prevent an unused-variable error in release mode.
DCHECK(status != OptimizedCompileJob::FAILED);
// The function may have already been optimized by OSR. Simply continue.
// Use a mutex to make sure that functions marked for install
// are always also queued.
- if (job_based_recompilation_) output_queue_mutex_.Lock();
- output_queue_.Enqueue(job);
- if (job_based_recompilation_) output_queue_mutex_.Unlock();
+ base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ output_queue_.push(job);
isolate_->stack_guard()->RequestInstallCode();
}
-void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
- OptimizedCompileJob* job;
- while ((job = NextInput())) {
- DCHECK(!job_based_recompilation_);
- // This should not block, since we have one signal on the input queue
- // semaphore corresponding to each element in the input queue.
- input_queue_semaphore_.Wait();
- // OSR jobs are dealt with separately.
- if (!job->info()->is_osr()) {
- DisposeOptimizedCompileJob(job, restore_function_code);
+void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
+ for (;;) {
+ OptimizedCompileJob* job = NULL;
+ {
+ base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ if (output_queue_.empty()) return;
+ job = output_queue_.front();
+ output_queue_.pop();
}
- }
-}
-
-void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
- OptimizedCompileJob* job;
- while (output_queue_.Dequeue(&job)) {
// OSR jobs are dealt with separately.
if (!job->info()->is_osr()) {
DisposeOptimizedCompileJob(job, restore_function_code);
@@ -224,7 +158,7 @@ void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
}
-void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
+void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
for (int i = 0; i < osr_buffer_capacity_; i++) {
if (osr_buffer_[i] != NULL) {
DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
@@ -234,37 +168,29 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
}
-void OptimizingCompilerThread::Flush() {
- DCHECK(!IsOptimizerThread());
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+void OptimizingCompileDispatcher::Flush() {
+ base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
- if (!job_based_recompilation_) {
- input_queue_semaphore_.Signal();
- stop_semaphore_.Wait();
- } else {
+ {
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
+ base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
}
FlushOutputQueue(true);
if (FLAG_concurrent_osr) FlushOsrBuffer(true);
- if (tracing_enabled_) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Flushed concurrent recompilation queues.\n");
}
}
-void OptimizingCompilerThread::Stop() {
- DCHECK(!IsOptimizerThread());
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
+void OptimizingCompileDispatcher::Stop() {
+ base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
- if (!job_based_recompilation_) {
- input_queue_semaphore_.Signal();
- stop_semaphore_.Wait();
- } else {
+ {
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
- base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
+ base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
}
if (recompilation_delay_ != 0) {
@@ -273,32 +199,29 @@ void OptimizingCompilerThread::Stop() {
while (input_queue_length_ > 0) CompileNext(NextInput());
InstallOptimizedFunctions();
} else {
- FlushInputQueue(false);
FlushOutputQueue(false);
}
if (FLAG_concurrent_osr) FlushOsrBuffer(false);
- if (tracing_enabled_) {
- double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
- if (job_based_recompilation_) percentage = 100.0;
- PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
- }
-
- if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
+ if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
+ FLAG_concurrent_osr) {
PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
}
-
- Join();
}
-void OptimizingCompilerThread::InstallOptimizedFunctions() {
- DCHECK(!IsOptimizerThread());
+void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
- OptimizedCompileJob* job;
- while (output_queue_.Dequeue(&job)) {
+ for (;;) {
+ OptimizedCompileJob* job = NULL;
+ {
+ base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
+ if (output_queue_.empty()) return;
+ job = output_queue_.front();
+ output_queue_.pop();
+ }
CompilationInfo* info = job->info();
Handle<JSFunction> function(*info->closure());
if (info->is_osr()) {
@@ -315,7 +238,7 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
BackEdgeTable::RemoveStackCheck(code, offset);
} else {
if (function->IsOptimized()) {
- if (tracing_enabled_) {
+ if (FLAG_trace_concurrent_recompilation) {
PrintF(" ** Aborting compilation for ");
function->ShortPrint();
PrintF(" as it has already been optimized.\n");
@@ -323,17 +246,17 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
DisposeOptimizedCompileJob(job, false);
} else {
Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
- function->ReplaceCode(
- code.is_null() ? function->shared()->code() : *code);
+ function->ReplaceCode(code.is_null() ? function->shared()->code()
+ : *code);
}
}
}
}
-void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
+void OptimizingCompileDispatcher::QueueForOptimization(
+ OptimizedCompileJob* job) {
DCHECK(IsQueueAvailable());
- DCHECK(!IsOptimizerThread());
CompilationInfo* info = job->info();
if (info->is_osr()) {
osr_attempts_++;
@@ -354,36 +277,27 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
}
if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
- } else if (job_based_recompilation_) {
+ } else {
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
- } else {
- input_queue_semaphore_.Signal();
}
}
-void OptimizingCompilerThread::Unblock() {
- DCHECK(!IsOptimizerThread());
+void OptimizingCompileDispatcher::Unblock() {
while (blocked_jobs_ > 0) {
- if (job_based_recompilation_) {
- V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new CompileTask(isolate_), v8::Platform::kShortRunningTask);
- } else {
- input_queue_semaphore_.Signal();
- }
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new CompileTask(isolate_), v8::Platform::kShortRunningTask);
blocked_jobs_--;
}
}
-OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
+OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
Handle<JSFunction> function, BailoutId osr_ast_id) {
- DCHECK(!IsOptimizerThread());
for (int i = 0; i < osr_buffer_capacity_; i++) {
OptimizedCompileJob* current = osr_buffer_[i];
- if (current != NULL &&
- current->IsWaitingForInstall() &&
+ if (current != NULL && current->IsWaitingForInstall() &&
current->info()->HasSameOsrEntry(function, osr_ast_id)) {
osr_hits_++;
osr_buffer_[i] = NULL;
@@ -394,9 +308,8 @@ OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
}
-bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
- BailoutId osr_ast_id) {
- DCHECK(!IsOptimizerThread());
+bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
+ BailoutId osr_ast_id) {
for (int i = 0; i < osr_buffer_capacity_; i++) {
OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL &&
@@ -408,8 +321,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
}
-bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
- DCHECK(!IsOptimizerThread());
+bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
for (int i = 0; i < osr_buffer_capacity_; i++) {
OptimizedCompileJob* current = osr_buffer_[i];
if (current != NULL && *current->info()->closure() == function) {
@@ -420,8 +332,7 @@ bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
}
-void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
- DCHECK(!IsOptimizerThread());
+void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
// Find the next slot that is empty or has a stale job.
OptimizedCompileJob* stale = NULL;
while (true) {
@@ -444,20 +355,5 @@ void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
osr_buffer_[osr_buffer_cursor_] = job;
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
}
-
-
-#ifdef DEBUG
-bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
- return isolate->concurrent_recompilation_enabled() &&
- isolate->optimizing_compiler_thread()->IsOptimizerThread();
}
-
-
-bool OptimizingCompilerThread::IsOptimizerThread() {
- base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
- return ThreadId::Current().ToInteger() == thread_id_;
-}
-#endif
-
-
-} } // namespace v8::internal
+} // namespace v8::internal
« no previous file with comments | « src/optimizing-compile-dispatcher.h ('k') | src/optimizing-compiler-thread.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698