| Index: src/cancelable-task.cc
|
| diff --git a/src/cancelable-task.cc b/src/cancelable-task.cc
|
| index 5927c22cdeb20b18081b4cdcdacbc6c3977ee72e..a2f344d39fc5b9ef699c0c43bca98ad7d7a53748 100644
|
| --- a/src/cancelable-task.cc
|
| +++ b/src/cancelable-task.cc
|
| @@ -11,18 +11,99 @@ namespace v8 {
|
| namespace internal {
|
|
|
|
|
| -Cancelable::Cancelable(Isolate* isolate)
|
| - : isolate_(isolate), is_cancelled_(false) {
|
| - isolate->RegisterCancelableTask(this);
|
| +Cancelable::Cancelable(CancelableTaskManager* parent)
|
| + : parent_(parent), status_(kWaiting), id_(0), cancel_counter_(0) {
|
| + id_ = parent->Register(this);
|
| + CHECK(id_ != 0);
|
| }
|
|
|
|
|
| Cancelable::~Cancelable() {
|
| - if (!is_cancelled_) {
|
| - isolate_->RemoveCancelableTask(this);
|
| + // The following check is needed to avoid calling an already terminated
|
| + // manager object. This happens when the manager cancels all pending tasks
|
| + // in {CancelAndWait} only before destroying the manager object.
|
| + if (TryRun() || IsRunning()) {
|
| + parent_->TryAbort(id_);
|
| }
|
| }
|
|
|
|
|
| +static bool ComparePointers(void* ptr1, void* ptr2) { return ptr1 == ptr2; }
|
| +
|
| +
|
| +CancelableTaskManager::CancelableTaskManager()
|
| + : task_id_counter_(0), cancelable_tasks_(ComparePointers) {}
|
| +
|
| +
|
| +uint32_t CancelableTaskManager::Register(Cancelable* task) {
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| + uint32_t id = ++task_id_counter_;
|
| + // The loop below is just used when task_id_counter_ overflows.
|
| + while ((id == 0) || (cancelable_tasks_.Lookup(reinterpret_cast<void*>(id),
|
| + id) != nullptr)) {
|
| + ++id;
|
| + }
|
| + HashMap::Entry* entry =
|
| + cancelable_tasks_.LookupOrInsert(reinterpret_cast<void*>(id), id);
|
| + entry->value = task;
|
| + return id;
|
| +}
|
| +
|
| +
|
| +bool CancelableTaskManager::TryAbort(uint32_t id) {
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| + Cancelable* value = reinterpret_cast<Cancelable*>(
|
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id));
|
| + if (value != nullptr) {
|
| + bool success = value->Cancel();
|
| + cancelable_tasks_barrier_.NotifyOne();
|
| + if (!success) return false;
|
| + return true;
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +
|
| +void CancelableTaskManager::CancelAndWait() {
|
| + // Clean up all cancelable fore- and background tasks. Tasks are canceled on
|
| + // the way if possible, i.e., if they have not started yet. After each round
|
| + // of canceling we wait for the background tasks that have already been
|
| + // started.
|
| + base::LockGuard<base::Mutex> guard(&mutex_);
|
| +
|
| + // HashMap does not support removing while iterating, hence keep a set of
|
| + // entries that are to be removed.
|
| + std::set<uint32_t> to_remove;
|
| +
|
| + // Cancelable tasks could potentially register new tasks, requiring a loop
|
| + // here.
|
| + while (cancelable_tasks_.occupancy() > 0) {
|
| + for (HashMap::Entry* p = cancelable_tasks_.Start(); p != nullptr;
|
| + p = cancelable_tasks_.Next(p)) {
|
| + if (reinterpret_cast<Cancelable*>(p->value)->Cancel()) {
|
| + to_remove.insert(reinterpret_cast<Cancelable*>(p->value)->id());
|
| + }
|
| + }
|
| + // Remove tasks that were successfully canceled.
|
| + for (auto id : to_remove) {
|
| + cancelable_tasks_.Remove(reinterpret_cast<void*>(id), id);
|
| + }
|
| + to_remove.clear();
|
| +
|
| + // Finally, wait for already running background tasks.
|
| + if (cancelable_tasks_.occupancy() > 0) {
|
| + cancelable_tasks_barrier_.Wait(&mutex_);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +CancelableTask::CancelableTask(Isolate* isolate)
|
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
|
| +
|
| +
|
| +CancelableIdleTask::CancelableIdleTask(Isolate* isolate)
|
| + : Cancelable(isolate->cancelable_task_manager()), isolate_(isolate) {}
|
| +
|
| } // namespace internal
|
| } // namespace v8
|
|
|