Index: base/message_loop.cc |
diff --git a/base/message_loop.cc b/base/message_loop.cc |
index 85e37d4266eeef3986205e01047f00bd595e7a52..d080b8b308dc81a26a55c5f20b2c9aef87c64290 100644 |
--- a/base/message_loop.cc |
+++ b/base/message_loop.cc |
@@ -1,9 +1,12 @@ |
-// Copyright (c) 2010 The Chromium Authors. All rights reserved. |
+// Copyright (c) 2011 The Chromium Authors. All rights reserved. |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
#include "base/message_loop.h" |
+// This needs to be above the gdk includes because gdk #defines Status. |
willchan no longer on Chromium
2011/03/22 23:30:45
We should instead move the platform specific inclu
awong
2011/03/23 04:05:45
Done.
|
+#include "base/tracked_objects.h" |
+ |
#if defined(OS_POSIX) && !defined(OS_MACOSX) |
#include <gdk/gdk.h> |
#include <gdk/gdkx.h> |
@@ -11,11 +14,13 @@ |
#include <algorithm> |
+#include "base/bind.h" |
#include "base/compiler_specific.h" |
#include "base/lazy_instance.h" |
#include "base/logging.h" |
#include "base/message_pump_default.h" |
#include "base/metrics/histogram.h" |
+#include "base/scoped_ptr.h" |
#include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
#include "base/threading/thread_local.h" |
@@ -81,6 +86,39 @@ const base::LinearHistogram::DescriptionPair event_descriptions_[] = { |
bool enable_histogrammer_ = false; |
+// TODO(ajwong): This is one use case for a Owned() tag that behaves like a |
+// "Unique" pointer. If we had that, and Tasks were always safe to delete on |
+// MessageLoop shutdown, this class could just be a function. |
+class TaskClosureAdapter : public base::RefCounted<TaskClosureAdapter> { |
+ public: |
+ // |should_leak_task| points to a flag variable that can be used to determine |
willchan no longer on Chromium
2011/03/22 23:30:45
Why do we need to use a pointer? Can't we determin
awong
2011/03/23 04:05:45
It needs to be a pointer because whether or not th
willchan no longer on Chromium
2011/03/23 18:01:10
I'm not sure if I completely understand. If a Task
awong
2011/03/23 19:33:01
Not quite. In DeletePendingClosure(), there seems
willchan no longer on Chromium
2011/03/23 19:41:56
Hah! I missed that little subtlety. I don't see wh
|
+ // if this class should leak the Task on destruction. This is important |
+ // at MessageLoop shutdown since not all tasks can be safely deleted without |
+ // running. |
+ TaskClosureAdapter(Task* task, bool* should_leak_task) |
+ : task_(task), |
+ should_leak_task_(should_leak_task) { |
+ } |
+ |
+ void Run() { |
+ task_->Run(); |
+ delete task_; |
+ task_ = NULL; |
+ } |
+ |
+ private: |
+ friend class base::RefCounted<TaskClosureAdapter>; |
+ |
+ ~TaskClosureAdapter() { |
+ if (!*should_leak_task_) { |
+ delete task_; |
+ } |
+ } |
+ |
+ Task* task_; |
willchan no longer on Chromium
2011/03/22 23:30:45
I sorta feel like this should be a scoped_ptr to i
awong
2011/03/23 04:05:45
I bounced between the two. In the end, it seemed
|
+ bool* should_leak_task_; |
+}; |
+ |
} // namespace |
//------------------------------------------------------------------------------ |
@@ -107,10 +145,10 @@ static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() { |
//------------------------------------------------------------------------------ |
-MessageLoop::TaskObserver::TaskObserver() { |
+MessageLoop::ClosureObserver::ClosureObserver() { |
} |
-MessageLoop::TaskObserver::~TaskObserver() { |
+MessageLoop::ClosureObserver::~ClosureObserver() { |
} |
MessageLoop::DestructionObserver::~DestructionObserver() { |
@@ -123,6 +161,7 @@ MessageLoop::MessageLoop(Type type) |
nestable_tasks_allowed_(true), |
exception_restoration_(false), |
state_(NULL), |
+ should_leak_tasks_(true), |
next_sequence_num_(0) { |
DCHECK(!current()) << "should only have one message loop per thread"; |
lazy_tls_ptr.Pointer()->Set(this); |
@@ -172,10 +211,10 @@ MessageLoop::~MessageLoop() { |
// is being stubborn. Inspect the queues to see who is left. |
bool did_work; |
for (int i = 0; i < 100; ++i) { |
- DeletePendingTasks(); |
+ DeletePendingClosures(); |
ReloadWorkQueue(); |
// If we end up with empty queues, then break out of the loop. |
- did_work = DeletePendingTasks(); |
+ did_work = DeletePendingClosures(); |
if (!did_work) |
break; |
} |
@@ -216,22 +255,78 @@ void MessageLoop::RemoveDestructionObserver( |
void MessageLoop::PostTask( |
const tracked_objects::Location& from_here, Task* task) { |
- PostTask_Helper(from_here, task, 0, true); |
+ PendingClosure pending_closure( |
+ base::Bind(&TaskClosureAdapter::Run, |
+ new TaskClosureAdapter(task, &should_leak_tasks_)), |
+ from_here, |
+ CalculateDelayedRuntime(0), true); |
+ // TODO(ajwong): This failed because pending_closure outlives the add to the |
+ // incoming_queue_. |
+ AddToIncomingQueue(&pending_closure); |
} |
void MessageLoop::PostDelayedTask( |
const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
- PostTask_Helper(from_here, task, delay_ms, true); |
+ PendingClosure pending_closure( |
+ base::Bind(&TaskClosureAdapter::Run, |
+ new TaskClosureAdapter(task, &should_leak_tasks_)), |
+ from_here, |
+ CalculateDelayedRuntime(delay_ms), true); |
+ AddToIncomingQueue(&pending_closure); |
} |
void MessageLoop::PostNonNestableTask( |
const tracked_objects::Location& from_here, Task* task) { |
- PostTask_Helper(from_here, task, 0, false); |
+ PendingClosure pending_closure( |
+ base::Bind(&TaskClosureAdapter::Run, |
+ new TaskClosureAdapter(task, &should_leak_tasks_)), |
+ from_here, |
+ CalculateDelayedRuntime(0), false); |
+ AddToIncomingQueue(&pending_closure); |
} |
void MessageLoop::PostNonNestableDelayedTask( |
const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
- PostTask_Helper(from_here, task, delay_ms, false); |
+ PendingClosure pending_closure( |
+ base::Bind(&TaskClosureAdapter::Run, |
+ new TaskClosureAdapter(task, &should_leak_tasks_)), |
+ from_here, |
+ CalculateDelayedRuntime(delay_ms), false); |
+ AddToIncomingQueue(&pending_closure); |
+} |
+ |
+void MessageLoop::PostClosure( |
+ const tracked_objects::Location& from_here, const base::Closure& closure) { |
+ DCHECK(!closure.is_null()); |
+ PendingClosure pending_closure(closure, from_here, |
+ CalculateDelayedRuntime(0), true); |
+ AddToIncomingQueue(&pending_closure); |
+} |
+ |
+void MessageLoop::PostDelayedClosure( |
+ const tracked_objects::Location& from_here, const base::Closure& closure, |
+ int64 delay_ms) { |
+ DCHECK(!closure.is_null()); |
+ PendingClosure pending_closure(closure, from_here, |
+ CalculateDelayedRuntime(delay_ms), true); |
+ AddToIncomingQueue(&pending_closure); |
+} |
+ |
+void MessageLoop::PostNonNestableClosure( |
+ const tracked_objects::Location& from_here, const base::Closure& closure) { |
+ DCHECK(!closure.is_null()); |
+ PendingClosure pending_closure(closure, from_here, |
+ CalculateDelayedRuntime(0), false); |
+ AddToIncomingQueue(&pending_closure); |
+} |
+ |
+void MessageLoop::PostNonNestableDelayedClosure( |
+ const tracked_objects::Location& from_here, const base::Closure& closure, |
+ int64 delay_ms) { |
+ DCHECK(!closure.is_null()); |
+ PendingClosure pending_closure(closure, from_here, |
+ CalculateDelayedRuntime(delay_ms), false); |
+ AddToIncomingQueue(&pending_closure); |
} |
void MessageLoop::Run() { |
@@ -281,14 +376,14 @@ bool MessageLoop::IsNested() { |
return state_->run_depth > 1; |
} |
-void MessageLoop::AddTaskObserver(TaskObserver* task_observer) { |
+void MessageLoop::AddClosureObserver(ClosureObserver* closure_observer) { |
DCHECK_EQ(this, current()); |
- task_observers_.AddObserver(task_observer); |
+ closure_observers_.AddObserver(closure_observer); |
} |
-void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) { |
+void MessageLoop::RemoveClosureObserver(ClosureObserver* closure_observer) { |
DCHECK_EQ(this, current()); |
- task_observers_.RemoveObserver(task_observer); |
+ closure_observers_.RemoveObserver(closure_observer); |
} |
void MessageLoop::AssertIdle() const { |
@@ -349,31 +444,40 @@ bool MessageLoop::ProcessNextDelayedNonNestableTask() { |
if (deferred_non_nestable_work_queue_.empty()) |
return false; |
- Task* task = deferred_non_nestable_work_queue_.front().task; |
+ PendingClosure pending_closure = deferred_non_nestable_work_queue_.front(); |
deferred_non_nestable_work_queue_.pop(); |
- RunTask(task); |
+ RunClosure(pending_closure); |
return true; |
} |
-void MessageLoop::RunTask(Task* task) { |
+void MessageLoop::RunClosure(const PendingClosure& pending_closure) { |
DCHECK(nestable_tasks_allowed_); |
// Execute the task and assume the worst: It is probably not reentrant. |
nestable_tasks_allowed_ = false; |
HistogramEvent(kTaskRunEvent); |
- FOR_EACH_OBSERVER(TaskObserver, task_observers_, |
- WillProcessTask(task)); |
- task->Run(); |
- FOR_EACH_OBSERVER(TaskObserver, task_observers_, DidProcessTask(task)); |
- delete task; |
+ FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
+ WillProcessClosure(pending_closure.time_posted)); |
+ pending_closure.closure.Run(); |
+ FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
+ DidProcessClosure(pending_closure.time_posted)); |
+ |
+#if defined(TRACK_ALL_TASK_OBJECTS) |
willchan no longer on Chromium
2011/03/22 23:30:45
Where are the corresponding changes to remove this
awong
2011/03/23 04:05:45
WorkerPool is not migrated yet so I can't break th
willchan no longer on Chromium
2011/03/23 18:01:10
My preference would be to do it in the same CL, bu
awong
2011/03/23 19:33:01
Fixing WorkerPool will take a bit of works since I
willchan no longer on Chromium
2011/03/23 19:41:56
Fine with me.
|
+ if (tracked_objects::ThreadData::IsActive() && pending_closure.post_births) { |
+ tracked_objects::ThreadData::current()->TallyADeath( |
+ *pending_closure.post_births, |
+ TimeTicks::Now() - pending_closure.time_posted); |
+ } |
+#endif // defined(TRACK_ALL_TASK_OBJECTS) |
nestable_tasks_allowed_ = true; |
} |
-bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) { |
- if (pending_task.nestable || state_->run_depth == 1) { |
- RunTask(pending_task.task); |
+bool MessageLoop::DeferOrRunPendingClosure( |
+ const PendingClosure& pending_closure) { |
+ if (pending_closure.nestable || state_->run_depth == 1) { |
+ RunClosure(pending_closure); |
// Show that we ran a task (Note: a new one might arrive as a |
// consequence!). |
return true; |
@@ -381,18 +485,18 @@ bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) { |
// We couldn't run the task now because we're in a nested message loop |
// and the task isn't nestable. |
- deferred_non_nestable_work_queue_.push(pending_task); |
+ deferred_non_nestable_work_queue_.push(pending_closure); |
return false; |
} |
-void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) { |
+void MessageLoop::AddToDelayedWorkQueue(const PendingClosure& pending_closure) { |
// Move to the delayed work queue. Initialize the sequence number |
// before inserting into the delayed_work_queue_. The sequence number |
// is used to faciliate FIFO sorting when two tasks have the same |
// delayed_run_time value. |
- PendingTask new_pending_task(pending_task); |
- new_pending_task.sequence_num = next_sequence_num_++; |
- delayed_work_queue_.push(new_pending_task); |
+ PendingClosure new_pending_closure(pending_closure); |
+ new_pending_closure.sequence_num = next_sequence_num_++; |
+ delayed_work_queue_.push(new_pending_closure); |
} |
void MessageLoop::ReloadWorkQueue() { |
@@ -413,62 +517,50 @@ void MessageLoop::ReloadWorkQueue() { |
} |
} |
-bool MessageLoop::DeletePendingTasks() { |
+bool MessageLoop::DeletePendingClosures() { |
bool did_work = !work_queue_.empty(); |
+ // TODO(darin): Delete all tasks once it is safe to do so. |
+ // Until it is totally safe, just do it when running Purify or |
+ // Valgrind. |
+ // |
+#if defined(PURIFY) || defined(USE_HEAPCHECKER) |
+ should_leak_tasks_ = false; |
+#else |
+ if (RunningOnValgrind()) |
+ should_leak_tasks_ = false; |
+#endif // defined(OS_POSIX) |
while (!work_queue_.empty()) { |
- PendingTask pending_task = work_queue_.front(); |
+ PendingClosure pending_closure = work_queue_.front(); |
work_queue_.pop(); |
- if (!pending_task.delayed_run_time.is_null()) { |
+ if (!pending_closure.delayed_run_time.is_null()) { |
// We want to delete delayed tasks in the same order in which they would |
// normally be deleted in case of any funny dependencies between delayed |
// tasks. |
- AddToDelayedWorkQueue(pending_task); |
- } else { |
- // TODO(darin): Delete all tasks once it is safe to do so. |
- // Until it is totally safe, just do it when running Purify or |
- // Valgrind. |
-#if defined(PURIFY) || defined(USE_HEAPCHECKER) |
- delete pending_task.task; |
-#else |
- if (RunningOnValgrind()) |
- delete pending_task.task; |
-#endif // defined(OS_POSIX) |
+ AddToDelayedWorkQueue(pending_closure); |
} |
} |
did_work |= !deferred_non_nestable_work_queue_.empty(); |
while (!deferred_non_nestable_work_queue_.empty()) { |
- // TODO(darin): Delete all tasks once it is safe to do so. |
- // Until it is totaly safe, only delete them under Purify and Valgrind. |
- Task* task = NULL; |
-#if defined(PURIFY) || defined(USE_HEAPCHECKER) |
- task = deferred_non_nestable_work_queue_.front().task; |
-#else |
- if (RunningOnValgrind()) |
- task = deferred_non_nestable_work_queue_.front().task; |
-#endif |
deferred_non_nestable_work_queue_.pop(); |
- if (task) |
- delete task; |
} |
did_work |= !delayed_work_queue_.empty(); |
+ |
+ // Historically, we always delete the task regardless of valgrind status. It's |
+ // not completely clear why we want to leak them in the loops above. This |
+ // code is replicating legacy behavior, and should not be considered |
+ // absolutely "correct" behavior. |
+ should_leak_tasks_ = false; |
while (!delayed_work_queue_.empty()) { |
- Task* task = delayed_work_queue_.top().task; |
delayed_work_queue_.pop(); |
- delete task; |
} |
+ should_leak_tasks_ = true; |
return did_work; |
} |
-// Possibly called on a background thread! |
-void MessageLoop::PostTask_Helper( |
- const tracked_objects::Location& from_here, Task* task, int64 delay_ms, |
- bool nestable) { |
- task->SetBirthPlace(from_here); |
- |
- PendingTask pending_task(task, nestable); |
- |
+TimeTicks MessageLoop::CalculateDelayedRuntime(int64 delay_ms) { |
+ TimeTicks delayed_run_time; |
if (delay_ms > 0) { |
- pending_task.delayed_run_time = |
+ delayed_run_time = |
TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms); |
#if defined(OS_WIN) |
@@ -500,6 +592,11 @@ void MessageLoop::PostTask_Helper( |
} |
#endif |
+ return delayed_run_time; |
+} |
+ |
+// Possibly called on a background thread! |
+void MessageLoop::AddToIncomingQueue(PendingClosure* pending_closure) { |
// Warning: Don't try to short-circuit, and handle this thread's tasks more |
// directly, as it could starve handling of foreign threads. Put every task |
// into this queue. |
@@ -509,7 +606,8 @@ void MessageLoop::PostTask_Helper( |
base::AutoLock locked(incoming_queue_lock_); |
bool was_empty = incoming_queue_.empty(); |
- incoming_queue_.push(pending_task); |
+ incoming_queue_.push(*pending_closure); |
+ pending_closure->closure.Reset(); |
if (!was_empty) |
return; // Someone else should have started the sub-pump. |
@@ -558,15 +656,15 @@ bool MessageLoop::DoWork() { |
// Execute oldest task. |
do { |
- PendingTask pending_task = work_queue_.front(); |
+ PendingClosure pending_closure = work_queue_.front(); |
work_queue_.pop(); |
- if (!pending_task.delayed_run_time.is_null()) { |
- AddToDelayedWorkQueue(pending_task); |
+ if (!pending_closure.delayed_run_time.is_null()) { |
+ AddToDelayedWorkQueue(pending_closure); |
// If we changed the topmost task, then it is time to re-schedule. |
- if (delayed_work_queue_.top().task == pending_task.task) |
- pump_->ScheduleDelayedWork(pending_task.delayed_run_time); |
+ if (delayed_work_queue_.top().closure.Equals(pending_closure.closure)) |
+ pump_->ScheduleDelayedWork(pending_closure.delayed_run_time); |
} else { |
- if (DeferOrRunPendingTask(pending_task)) |
+ if (DeferOrRunPendingClosure(pending_closure)) |
return true; |
} |
} while (!work_queue_.empty()); |
@@ -576,7 +674,7 @@ bool MessageLoop::DoWork() { |
return false; |
} |
-bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) { |
+bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) { |
if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) { |
recent_time_ = *next_delayed_work_time = TimeTicks(); |
return false; |
@@ -598,13 +696,13 @@ bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) { |
} |
} |
- PendingTask pending_task = delayed_work_queue_.top(); |
+ PendingClosure pending_closure = delayed_work_queue_.top(); |
delayed_work_queue_.pop(); |
if (!delayed_work_queue_.empty()) |
*next_delayed_work_time = delayed_work_queue_.top().delayed_run_time; |
- return DeferOrRunPendingTask(pending_task); |
+ return DeferOrRunPendingClosure(pending_closure); |
} |
bool MessageLoop::DoIdleWork() { |
@@ -642,9 +740,32 @@ MessageLoop::AutoRunState::~AutoRunState() { |
} |
//------------------------------------------------------------------------------ |
-// MessageLoop::PendingTask |
+// MessageLoop::PendingClosure |
+ |
+MessageLoop::PendingClosure::PendingClosure( |
+ const base::Closure& closure, |
+ const tracked_objects::Location& posted_from, |
+ TimeTicks delayed_run_time, |
+ bool nestable) |
+ : closure(closure), |
+ time_posted(TimeTicks::Now()), |
+ delayed_run_time(delayed_run_time), sequence_num(0), |
+ nestable(nestable) { |
+#if defined(TRACK_ALL_TASK_OBJECTS) |
+ if (tracked_objects::ThreadData::IsActive()) { |
+ tracked_objects::ThreadData* current_thread_data = |
+ tracked_objects::ThreadData::current(); |
+ if (current_thread_data) { |
+ post_births = current_thread_data->TallyABirth(posted_from); |
+ } else { |
+ // Shutdown started, and this thread wasn't registered. |
+ post_births = NULL; |
+ } |
+ } |
+#endif // defined(TRACK_ALL_TASK_OBJECTS) |
+ } |
-bool MessageLoop::PendingTask::operator<(const PendingTask& other) const { |
+bool MessageLoop::PendingClosure::operator<(const PendingClosure& other) const { |
// Since the top of a priority queue is defined as the "greatest" element, we |
// need to invert the comparison here. We want the smaller time to be at the |
// top of the heap. |