| Index: src/cancelable-task.cc
|
| diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc
|
| index 5927c22cdeb20b18081b4cdcdacbc6c3977ee72e..a60d80bc213f7bad581b56e56f33036011017429 100644
|
| --- a/src/cancelable-task.cc
|
| +++ b/src/cancelable-task.cc
|
| @@ -11,18 +11,100 @@ namespace v8 {
|
| namespace internal {
|
|
|
|
|
| -Cancelable::Cancelable(Isolate* isolate)
|
| - : isolate_(isolate), is_cancelled_(false) {
|
| - isolate->RegisterCancelableTask(this);
|
| +Cancelable::Cancelable(CancelableTaskManager* parent)
|
| + : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
|
| + id_ = parent->Register(this);
|
| }
|
|
|
|
|
| Cancelable::~Cancelable() {
|
| - if (!is_cancelled_) {
|
| - isolate_->RemoveCancelableTask(this);
|
| + // Idle tasks could still be in kWaiting. Avoid race with shutdown of isolate
|
| + // by moving the state to kRunning.
|
| + if (TryRun() || IsRunning()) {
|
| + bool removed = parent_->Remove(id_);
|
| + USE(removed);
|
| + // This is counter intuitive but correct: Removing a task also involves
|
| + // canceling it, which is not possible anymore, since it is already in
|
| + // state running.
|
| + DCHECK(removed == false);
|
| }
|
| }
|
|
|
|
|
| +static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
|
| +
|
| +
|
| +CancelableTaskManager::CancelableTaskManager()
|
| + : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
|
| +
|
| +
|
| +uint32_t CancelableTaskManager::Register(Cancelable* task) {
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| + uint32_t id = ++task_id_counter_;
|
| + // If the id is still in use, try to find a new one.
|
| + while (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id) != nullptr) {
|
| + ++id;
|
| + }
|
| + HashMap::Entry* entry =
|
| + cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
|
| + entry->value = task;
|
| + return id;
|
| +}
|
| +
|
| +
|
| +bool CancelableTaskManager::Remove(uint32_t id) {
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| + Cancelable* value = reinterpret_cast<Cancelable*>(
|
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id));
|
| + if (value != nullptr) {
|
| + bool success = value->Cancel();
|
| + cancelable_tasks_barrier_.NotifyOne();
|
| + if (!success) return false;
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +void CancelableTaskManager::CancelAndWait() {
|
| + // Clean up all cancelable fore- and background tasks. Tasks are canceled on
|
| + // the way if possible, i.e., if they have not started yet. After each round
|
| + // of canceling we wait for the background tasks that have already been
|
| + // started.
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| +
|
| + // HashMap does not support removing while iterating, hence keep a set of
|
| + // entries that are to be removed.
|
| + std::set<uint32_t> to_remove;
|
| +
|
| + // Cancelable tasks could potentially register new tasks, requiring a loop
|
| + // here.
|
| + while (cancelable_tasks_.occupancy() > 0) {
|
| + for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
|
| + p = cancelable_tasks_.Next(p)) {
|
| + if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
|
| + to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
|
| + }
|
| + }
|
| + // Remove tasks that were successfully canceled.
|
| + for (auto id : to_remove) {
|
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
|
| + }
|
| + to_remove.clear();
|
| +
|
| + // Finally, wait for already running background tasks.
|
| + if (cancelable_tasks_.occupancy() > 0) {
|
| + cancelable_tasks_barrier_.Wait(&mutex_);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +CancelableTask::CancelableTask(Isolate* isolate)
|
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
|
| +
|
| +CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
|
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
|
| +
|
| } // namespace internal
|
| } // namespace v8
|
|
|