Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1379)

Unified Diff: runtime/vm/thread_pool.cc

Issue 1177153005: Enables clean VM shutdown. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: Kill isolates from the service isolate Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/thread_pool.cc
diff --git a/runtime/vm/thread_pool.cc b/runtime/vm/thread_pool.cc
index 5b3a713d2d09b2c32be7605f622e7671669e36c6..2541eacd32a95bafcc4438fd7dc6cbf66f9ebeb2 100644
--- a/runtime/vm/thread_pool.cc
+++ b/runtime/vm/thread_pool.cc
@@ -11,24 +11,27 @@ namespace dart {
DEFINE_FLAG(int, worker_timeout_millis, 5000,
"Free workers when they have been idle for this amount of time.");
+#if defined(USING_SIMULATOR)
+// The simulator can take much longer to finish running Dart code.
+DEFINE_FLAG(int, shutdown_timeout_millis, 15000,
+ "Amount of time to wait for a worker to stop during shutdown.");
+#else
+DEFINE_FLAG(int, shutdown_timeout_millis, 1000,
+ "Amount of time to wait for a worker to stop during shutdown.");
+#endif
-Monitor* ThreadPool::exit_monitor_ = NULL;
-int* ThreadPool::exit_count_ = NULL;
-
-ThreadPool::ThreadPool()
- : shutting_down_(false),
- all_workers_(NULL),
- idle_workers_(NULL),
- count_started_(0),
- count_stopped_(0),
- count_running_(0),
- count_idle_(0) {
-}
-
+Mutex ThreadPool::mutex_;
+bool ThreadPool::shutting_down_ = false;
+ThreadPool::Worker* ThreadPool::all_workers_ = NULL;
+ThreadPool::Worker* ThreadPool::idle_workers_ = NULL;
+uint64_t ThreadPool::count_started_ = 0;
+uint64_t ThreadPool::count_stopped_ = 0;
+uint64_t ThreadPool::count_running_ = 0;
+uint64_t ThreadPool::count_idle_ = 0;
+bool ThreadPool::shutdown_timeout_fatal_ = true;
-ThreadPool::~ThreadPool() {
- Shutdown();
-}
+Monitor ThreadPool::exit_monitor_;
+ThreadPool::Worker* ThreadPool::shutting_down_workers_ = NULL;
void ThreadPool::Run(Task* task) {
@@ -42,7 +45,7 @@ void ThreadPool::Run(Task* task) {
return;
}
if (idle_workers_ == NULL) {
- worker = new Worker(this);
+ worker = new Worker();
ASSERT(worker != NULL);
new_worker = true;
count_started_++;
@@ -70,7 +73,7 @@ void ThreadPool::Run(Task* task) {
}
-void ThreadPool::Shutdown() {
+bool ThreadPool::Shutdown() {
Worker* saved = NULL;
{
MutexLocker ml(&mutex_);
@@ -94,14 +97,48 @@ void ThreadPool::Shutdown() {
}
// Release ThreadPool::mutex_ before calling Worker functions.
- Worker* current = saved;
- while (current != NULL) {
- // We may access all_next_ without holding ThreadPool::mutex_ here
- // because the worker is no longer owned by the ThreadPool.
- Worker* next = current->all_next_;
- current->all_next_ = NULL;
- current->Shutdown();
- current = next;
+ {
+ MonitorLocker eml(&ThreadPool::exit_monitor_);
+
+ // First tell all the workers to shut down.
+ Worker* current = saved;
+ while (current != NULL) {
+ Worker* next = current->all_next_;
+ if (current->id_ != OSThread::GetCurrentThreadId()) {
+ AddWorkerToShutdownList(current);
+ }
+ current->Shutdown();
+ current = next;
+ }
+ saved = NULL;
+
+ // Give workers a chance to exit gracefully.
+ const int64_t start_wait = OS::GetCurrentTimeMillis();
+ int timeout = FLAG_shutdown_timeout_millis;
+ while (shutting_down_workers_ != NULL) {
+ if (timeout > 0) {
+ // Here, we are waiting for workers to exit. When a worker exits we will
+ // be notified.
+ eml.Wait(timeout);
+
+ // We decrement the timeout for the next wait by the amount of time
+ // we've already waited. If the new timeout drops below zero, we break
+ // out of this loop, which triggers the termination code below.
+ const int64_t after_wait = OS::GetCurrentTimeMillis();
+ timeout = FLAG_shutdown_timeout_millis - (after_wait - start_wait);
+ } else {
+ break;
+ }
+ }
+
+ // It is an error if all workers have not exited within the timeout. We
+ // assume that they have run off into the weeds, and it is a bug.
+ bool timely_shutdown = shutting_down_workers_ == NULL;
+ if (!timely_shutdown && shutdown_timeout_fatal_) {
+ FATAL("Thread pool worker threads failed to exit.");
+ }
+
+ return timely_shutdown;
}
}
@@ -156,7 +193,7 @@ bool ThreadPool::RemoveWorkerFromAllList(Worker* worker) {
all_workers_ = worker->all_next_;
worker->all_next_ = NULL;
worker->owned_ = false;
- worker->pool_ = NULL;
+ worker->done_ = true;
return true;
}
@@ -206,6 +243,40 @@ bool ThreadPool::ReleaseIdleWorker(Worker* worker) {
}
+// Only call while holding the exit_monitor_
+void ThreadPool::AddWorkerToShutdownList(Worker* worker) {
+ worker->shutdown_next_ = shutting_down_workers_;
+ shutting_down_workers_ = worker;
+}
+
+
+// Only call while holding the exit_monitor_
+bool ThreadPool::RemoveWorkerFromShutdownList(Worker* worker) {
+ ASSERT(worker != NULL);
+ if (shutting_down_workers_ == NULL) {
+ return false;
+ }
+
+ // Special case head of list.
+ if (shutting_down_workers_ == worker) {
+ shutting_down_workers_ = worker->shutdown_next_;
+ worker->shutdown_next_ = NULL;
+ return true;
+ }
+
+ for (Worker* current = shutting_down_workers_;
+ current->shutdown_next_ != NULL;
+ current = current->shutdown_next_) {
+ if (current->shutdown_next_ == worker) {
+ current->shutdown_next_ = worker->shutdown_next_;
+ worker->shutdown_next_ = NULL;
+ return true;
+ }
+ }
+ return false;
+}
+
+
ThreadPool::Task::Task() {
}
@@ -214,12 +285,15 @@ ThreadPool::Task::~Task() {
}
-ThreadPool::Worker::Worker(ThreadPool* pool)
- : pool_(pool),
+ThreadPool::Worker::Worker()
+ : done_(false),
task_(NULL),
+ id_(OSThread::kInvalidThreadId),
+ started_(false),
owned_(false),
all_next_(NULL),
- idle_next_(NULL) {
+ idle_next_(NULL),
+ shutdown_next_(NULL) {
}
@@ -264,7 +338,7 @@ static int64_t ComputeTimeout(int64_t idle_start) {
}
-void ThreadPool::Worker::Loop() {
+bool ThreadPool::Worker::Loop() {
MonitorLocker ml(&monitor_);
int64_t idle_start;
while (true) {
@@ -281,10 +355,10 @@ void ThreadPool::Worker::Loop() {
ASSERT(task_ == NULL);
if (IsDone()) {
- return;
+ return false;
}
- ASSERT(pool_ != NULL);
- pool_->SetIdle(this);
+ ASSERT(!done_);
+ ThreadPool::SetIdle(this);
idle_start = OS::GetCurrentTimeMillis();
while (true) {
Monitor::WaitResult result = ml.Wait(ComputeTimeout(idle_start));
@@ -294,21 +368,22 @@ void ThreadPool::Worker::Loop() {
break;
}
if (IsDone()) {
- return;
+ return false;
}
- if (result == Monitor::kTimedOut &&
- pool_->ReleaseIdleWorker(this)) {
- return;
+ if ((result == Monitor::kTimedOut) &&
+ ThreadPool::ReleaseIdleWorker(this)) {
+ return true;
}
}
}
UNREACHABLE();
+ return false;
}
void ThreadPool::Worker::Shutdown() {
MonitorLocker ml(&monitor_);
- pool_ = NULL; // Fail fast if someone tries to access pool_.
+ done_ = true;
ml.Notify();
}
@@ -317,20 +392,55 @@ void ThreadPool::Worker::Shutdown() {
void ThreadPool::Worker::Main(uword args) {
Thread::EnsureInit();
Worker* worker = reinterpret_cast<Worker*>(args);
- worker->Loop();
+ bool delete_self = false;
+
+ {
+ MonitorLocker ml(&(worker->monitor_));
+ if (worker->IsDone()) {
+ // id_ hasn't been set, yet, but the ThreadPool is being shutdown.
+ // Delete the task, and return.
+ ASSERT(worker->task_);
+ delete worker->task_;
+ worker->task_ = NULL;
+ delete_self = true;
+ } else {
+ worker->id_ = OSThread::GetCurrentThreadId();
+ worker->started_ = true;
+ }
+ }
+
+ // We aren't able to delete the worker while holding the worker's monitor.
+ // Now that we have released it, and we know that ThreadPool::Shutdown
+ // won't touch it again, we can delete it and return.
+ if (delete_self) {
+ MonitorLocker eml(&ThreadPool::exit_monitor_);
+ RemoveWorkerFromShutdownList(worker);
+ delete worker;
+ eml.Notify();
+ return;
+ }
+
+ bool released = worker->Loop();
// It should be okay to access these unlocked here in this assert.
- ASSERT(!worker->owned_ &&
- worker->all_next_ == NULL &&
- worker->idle_next_ == NULL);
-
- // The exit monitor is only used during testing.
- if (ThreadPool::exit_monitor_) {
- MonitorLocker ml(ThreadPool::exit_monitor_);
- (*ThreadPool::exit_count_)++;
- ml.Notify();
+ // worker->all_next_ is retained by the pool for shutdown monitoring.
+ ASSERT(!worker->owned_ && (worker->idle_next_ == NULL));
+
+ if (!released) {
+ // This worker is exiting because the thread pool is being shut down.
+ // Inform the thread pool that we are exiting. We remove this worker from
+ // shutting_down_workers_ list because there will be no need for the
+ // ThreadPool to take action for this worker.
+ MonitorLocker eml(&ThreadPool::exit_monitor_);
+ worker->id_ = OSThread::kInvalidThreadId;
+ RemoveWorkerFromShutdownList(worker);
+ delete worker;
+ eml.Notify();
+ } else {
+ // This worker is going down because it was idle for too long. This case
+ // is not due to a ThreadPool Shutdown. Thus, we simply delete the worker.
+ delete worker;
}
- delete worker;
#if defined(TARGET_OS_WINDOWS)
Thread::CleanUp();
#endif

Powered by Google App Engine
This is Rietveld 408576698