Chromium Code Reviews| Index: src/cancelable-task.cc |
| diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc |
| index 5927c22cdeb20b18081b4cdcdacbc6c3977ee72e..41fb2da2e41aeb0d02b5efff6aefb37eef9a44f8 100644 |
| --- a/src/cancelable-task.cc |
| +++ b/src/cancelable-task.cc |
| @@ -11,18 +11,95 @@ namespace v8 { |
| namespace internal { |
| -Cancelable::Cancelable(Isolate* isolate) |
| - : isolate_(isolate), is_cancelled_(false) { |
| - isolate->RegisterCancelableTask(this); |
| +Cancelable::Cancelable(CancelableTaskManager* parent) |
| + : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) { |
| + id_ = parent->Register(this); |
| } |
| Cancelable::~Cancelable() { |
| - if (!is_cancelled_) { |
| - isolate_->RemoveCancelableTask(this); |
| + // Idle tasks could still be in kWaiting. Avoid race with shutdown of isolate |
| + // by moving the state to kRunning. |
| + if (TryRun() || IsRunning()) { |
| + bool removed = parent_->Remove(id_); |
| + USE(removed); |
| + DCHECK(removed); |
| } |
| } |
| +static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; } |
| + |
| + |
| +CancelableTaskManager::CancelableTaskManager() |
| + : task_id_counter_(0), cancelable_tasks_(ComparePointers) {} |
| + |
| + |
| +uint32_t CancelableTaskManager::Register(Cancelable* task) { |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + uint32_t id = ++task_id_counter_; |
| + // If the id is still in use, try to find a new one. |
| + while (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id), id) != nullptr) { |
|
Michael Lippautz
2015/11/10 18:00:05
We could just add a CHECK here for overflow. Also
|
| + ++id; |
| + } |
| + HashMap::Entry* entry = |
| + cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id); |
| + entry->value = task; |
| + return id; |
| +} |
| + |
| + |
| +bool CancelableTaskManager::Remove(uint32_t id) { |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + Cancelable* value = reinterpret_cast<Cancelable*>( |
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id)); |
| + if (value != nullptr) { |
| + value->Cancel(); |
| + cancelable_tasks_barrier_.NotifyOne(); |
| + } |
| + return value != nullptr; |
| +} |
| + |
| + |
| +void CancelableTaskManager::CancelAndWait() { |
| + // Clean up all cancelable fore- and background tasks. Tasks are canceled on |
| + // the way if possible, i.e., if they have not started yet. After each round |
| + // of canceling we wait for the background tasks that have already been |
| + // started. |
| + base::LockGuard<base::Mutex> guard(&mutex_); |
| + |
| + // HashMap does not support removing while iterating, hence keep a set of |
| + // entries that are to be removed. |
| + std::set<uint32_t> to_remove; |
| + |
| + // Cancelable tasks could potentially register new tasks, requiring a loop |
| + // here. |
| + while (cancelable_tasks_.occupancy() > 0) { |
| + for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr; |
| + p = cancelable_tasks_.Next(p)) { |
| + if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) { |
| + to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id()); |
| + } |
| + } |
| + // Remove tasks that were successfully canceled. |
| + for (auto id : to_remove) { |
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id); |
| + } |
| + to_remove.clear(); |
| + |
| + // Finally, wait for already running background tasks. |
| + if (cancelable_tasks_.occupancy() > 0) { |
| + cancelable_tasks_barrier_.Wait(&mutex_); |
| + } |
| + } |
| +} |
| + |
| + |
| +CancelableTask::CancelableTask(Isolate* isolate) |
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {} |
| + |
| +CancelableIdleTask::CancelableIdleTask(Isolate* isolate) |
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {} |
| + |
| } // namespace internal |
| } // namespace v8 |