OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/message_loop.h" | 5 #include "base/message_loop.h" |
6 | 6 |
7 #if defined(OS_POSIX) && !defined(OS_MACOSX) | |
8 #include <gdk/gdk.h> | |
9 #include <gdk/gdkx.h> | |
10 #endif | |
11 | |
12 #include <algorithm> | 7 #include <algorithm> |
13 | 8 |
| 9 #include "base/bind.h" |
14 #include "base/compiler_specific.h" | 10 #include "base/compiler_specific.h" |
15 #include "base/lazy_instance.h" | 11 #include "base/lazy_instance.h" |
16 #include "base/logging.h" | 12 #include "base/logging.h" |
17 #include "base/message_pump_default.h" | 13 #include "base/message_pump_default.h" |
18 #include "base/metrics/histogram.h" | 14 #include "base/metrics/histogram.h" |
| 15 #include "base/scoped_ptr.h" |
19 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 16 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
20 #include "base/threading/thread_local.h" | 17 #include "base/threading/thread_local.h" |
| 18 #include "base/tracked_objects.h" |
21 | 19 |
22 #if defined(OS_MACOSX) | 20 #if defined(OS_MACOSX) |
23 #include "base/message_pump_mac.h" | 21 #include "base/message_pump_mac.h" |
24 #endif | 22 #endif |
25 #if defined(OS_POSIX) | 23 #if defined(OS_POSIX) |
26 #include "base/message_pump_libevent.h" | 24 #include "base/message_pump_libevent.h" |
27 #endif | 25 #endif |
28 #if defined(OS_POSIX) && !defined(OS_MACOSX) | 26 #if defined(OS_POSIX) && !defined(OS_MACOSX) |
| 27 #include <gdk/gdk.h> |
| 28 #include <gdk/gdkx.h> |
29 #include "base/message_pump_glib.h" | 29 #include "base/message_pump_glib.h" |
30 #endif | 30 #endif |
31 #if defined(TOUCH_UI) | 31 #if defined(TOUCH_UI) |
32 #include "base/message_pump_glib_x.h" | 32 #include "base/message_pump_glib_x.h" |
33 #endif | 33 #endif |
34 | 34 |
35 using base::TimeDelta; | 35 using base::TimeDelta; |
36 using base::TimeTicks; | 36 using base::TimeTicks; |
37 | 37 |
38 namespace { | 38 namespace { |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 | 74 |
75 // A few events we handle (kindred to messages), and used to profile actions. | 75 // A few events we handle (kindred to messages), and used to profile actions. |
76 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent) | 76 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent) |
77 VALUE_TO_NUMBER_AND_NAME(kTimerEvent) | 77 VALUE_TO_NUMBER_AND_NAME(kTimerEvent) |
78 | 78 |
79 {-1, NULL} // The list must be null terminated, per API to histogram. | 79 {-1, NULL} // The list must be null terminated, per API to histogram. |
80 }; | 80 }; |
81 | 81 |
82 bool enable_histogrammer_ = false; | 82 bool enable_histogrammer_ = false; |
83 | 83 |
| 84 // TODO(ajwong): This is one use case for a Owned() tag that behaves like a |
| 85 // "Unique" pointer. If we had that, and Tasks were always safe to delete on |
| 86 // MessageLoop shutdown, this class could just be a function. |
| 87 class TaskClosureAdapter : public base::RefCounted<TaskClosureAdapter> { |
| 88 public: |
| 89 // |should_leak_task| points to a flag variable that can be used to determine |
| 90 // if this class should leak the Task on destruction. This is important |
| 91 // at MessageLoop shutdown since not all tasks can be safely deleted without |
| 92 // running. See MessageLoop::DeletePendingClosures() for the exact behavior |
| 93 // of when a Task should be deleted. It is subtle. |
| 94 TaskClosureAdapter(Task* task, bool* should_leak_task) |
| 95 : task_(task), |
| 96 should_leak_task_(should_leak_task) { |
| 97 } |
| 98 |
| 99 void Run() { |
| 100 task_->Run(); |
| 101 delete task_; |
| 102 task_ = NULL; |
| 103 } |
| 104 |
| 105 private: |
| 106 friend class base::RefCounted<TaskClosureAdapter>; |
| 107 |
| 108 ~TaskClosureAdapter() { |
| 109 if (!*should_leak_task_) { |
| 110 delete task_; |
| 111 } |
| 112 } |
| 113 |
| 114 Task* task_; |
| 115 bool* should_leak_task_; |
| 116 }; |
| 117 |
84 } // namespace | 118 } // namespace |
85 | 119 |
86 //------------------------------------------------------------------------------ | 120 //------------------------------------------------------------------------------ |
87 | 121 |
88 #if defined(OS_WIN) | 122 #if defined(OS_WIN) |
89 | 123 |
90 // Upon a SEH exception in this thread, it restores the original unhandled | 124 // Upon a SEH exception in this thread, it restores the original unhandled |
91 // exception filter. | 125 // exception filter. |
92 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) { | 126 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) { |
93 ::SetUnhandledExceptionFilter(old_filter); | 127 ::SetUnhandledExceptionFilter(old_filter); |
94 return EXCEPTION_CONTINUE_SEARCH; | 128 return EXCEPTION_CONTINUE_SEARCH; |
95 } | 129 } |
96 | 130 |
97 // Retrieves a pointer to the current unhandled exception filter. There | 131 // Retrieves a pointer to the current unhandled exception filter. There |
98 // is no standalone getter method. | 132 // is no standalone getter method. |
99 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() { | 133 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() { |
100 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL; | 134 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL; |
101 top_filter = ::SetUnhandledExceptionFilter(0); | 135 top_filter = ::SetUnhandledExceptionFilter(0); |
102 ::SetUnhandledExceptionFilter(top_filter); | 136 ::SetUnhandledExceptionFilter(top_filter); |
103 return top_filter; | 137 return top_filter; |
104 } | 138 } |
105 | 139 |
106 #endif // defined(OS_WIN) | 140 #endif // defined(OS_WIN) |
107 | 141 |
108 //------------------------------------------------------------------------------ | 142 //------------------------------------------------------------------------------ |
109 | 143 |
110 MessageLoop::TaskObserver::TaskObserver() { | 144 MessageLoop::ClosureObserver::ClosureObserver() { |
111 } | 145 } |
112 | 146 |
113 MessageLoop::TaskObserver::~TaskObserver() { | 147 MessageLoop::ClosureObserver::~ClosureObserver() { |
114 } | 148 } |
115 | 149 |
116 MessageLoop::DestructionObserver::~DestructionObserver() { | 150 MessageLoop::DestructionObserver::~DestructionObserver() { |
117 } | 151 } |
118 | 152 |
119 //------------------------------------------------------------------------------ | 153 //------------------------------------------------------------------------------ |
120 | 154 |
121 MessageLoop::MessageLoop(Type type) | 155 MessageLoop::MessageLoop(Type type) |
122 : type_(type), | 156 : type_(type), |
123 nestable_tasks_allowed_(true), | 157 nestable_tasks_allowed_(true), |
124 exception_restoration_(false), | 158 exception_restoration_(false), |
125 message_histogram_(NULL), | 159 message_histogram_(NULL), |
126 state_(NULL), | 160 state_(NULL), |
| 161 should_leak_tasks_(true), |
127 #ifdef OS_WIN | 162 #ifdef OS_WIN |
128 os_modal_loop_(false), | 163 os_modal_loop_(false), |
129 #endif // OS_WIN | 164 #endif // OS_WIN |
130 next_sequence_num_(0) { | 165 next_sequence_num_(0) { |
131 DCHECK(!current()) << "should only have one message loop per thread"; | 166 DCHECK(!current()) << "should only have one message loop per thread"; |
132 lazy_tls_ptr.Pointer()->Set(this); | 167 lazy_tls_ptr.Pointer()->Set(this); |
133 | 168 |
134 // TODO(rvargas): Get rid of the OS guards. | 169 // TODO(rvargas): Get rid of the OS guards. |
135 #if defined(OS_WIN) | 170 #if defined(OS_WIN) |
136 #define MESSAGE_PUMP_UI new base::MessagePumpForUI() | 171 #define MESSAGE_PUMP_UI new base::MessagePumpForUI() |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
169 DCHECK(!state_); | 204 DCHECK(!state_); |
170 | 205 |
171 // Clean up any unprocessed tasks, but take care: deleting a task could | 206 // Clean up any unprocessed tasks, but take care: deleting a task could |
172 // result in the addition of more tasks (e.g., via DeleteSoon). We set a | 207 // result in the addition of more tasks (e.g., via DeleteSoon). We set a |
173 // limit on the number of times we will allow a deleted task to generate more | 208 // limit on the number of times we will allow a deleted task to generate more |
174 // tasks. Normally, we should only pass through this loop once or twice. If | 209 // tasks. Normally, we should only pass through this loop once or twice. If |
175 // we end up hitting the loop limit, then it is probably due to one task that | 210 // we end up hitting the loop limit, then it is probably due to one task that |
176 // is being stubborn. Inspect the queues to see who is left. | 211 // is being stubborn. Inspect the queues to see who is left. |
177 bool did_work; | 212 bool did_work; |
178 for (int i = 0; i < 100; ++i) { | 213 for (int i = 0; i < 100; ++i) { |
179 DeletePendingTasks(); | 214 DeletePendingClosures(); |
180 ReloadWorkQueue(); | 215 ReloadWorkQueue(); |
181 // If we end up with empty queues, then break out of the loop. | 216 // If we end up with empty queues, then break out of the loop. |
182 did_work = DeletePendingTasks(); | 217 did_work = DeletePendingClosures(); |
183 if (!did_work) | 218 if (!did_work) |
184 break; | 219 break; |
185 } | 220 } |
186 DCHECK(!did_work); | 221 DCHECK(!did_work); |
187 | 222 |
188 // Let interested parties have one last shot at accessing this. | 223 // Let interested parties have one last shot at accessing this. |
189 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_, | 224 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_, |
190 WillDestroyCurrentMessageLoop()); | 225 WillDestroyCurrentMessageLoop()); |
191 | 226 |
192 // OK, now make it so that no one can find us. | 227 // OK, now make it so that no one can find us. |
(...skipping 20 matching lines...) Expand all Loading... |
213 } | 248 } |
214 | 249 |
215 void MessageLoop::RemoveDestructionObserver( | 250 void MessageLoop::RemoveDestructionObserver( |
216 DestructionObserver* destruction_observer) { | 251 DestructionObserver* destruction_observer) { |
217 DCHECK_EQ(this, current()); | 252 DCHECK_EQ(this, current()); |
218 destruction_observers_.RemoveObserver(destruction_observer); | 253 destruction_observers_.RemoveObserver(destruction_observer); |
219 } | 254 } |
220 | 255 |
221 void MessageLoop::PostTask( | 256 void MessageLoop::PostTask( |
222 const tracked_objects::Location& from_here, Task* task) { | 257 const tracked_objects::Location& from_here, Task* task) { |
223 PostTask_Helper(from_here, task, 0, true); | 258 PendingClosure pending_closure( |
| 259 base::Bind(&TaskClosureAdapter::Run, |
| 260 new TaskClosureAdapter(task, &should_leak_tasks_)), |
| 261 from_here, |
| 262 CalculateDelayedRuntime(0), true); |
| 263 AddToIncomingQueue(&pending_closure); |
224 } | 264 } |
225 | 265 |
226 void MessageLoop::PostDelayedTask( | 266 void MessageLoop::PostDelayedTask( |
227 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { | 267 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
228 PostTask_Helper(from_here, task, delay_ms, true); | 268 PendingClosure pending_closure( |
| 269 base::Bind(&TaskClosureAdapter::Run, |
| 270 new TaskClosureAdapter(task, &should_leak_tasks_)), |
| 271 from_here, |
| 272 CalculateDelayedRuntime(delay_ms), true); |
| 273 AddToIncomingQueue(&pending_closure); |
229 } | 274 } |
230 | 275 |
231 void MessageLoop::PostNonNestableTask( | 276 void MessageLoop::PostNonNestableTask( |
232 const tracked_objects::Location& from_here, Task* task) { | 277 const tracked_objects::Location& from_here, Task* task) { |
233 PostTask_Helper(from_here, task, 0, false); | 278 PendingClosure pending_closure( |
| 279 base::Bind(&TaskClosureAdapter::Run, |
| 280 new TaskClosureAdapter(task, &should_leak_tasks_)), |
| 281 from_here, |
| 282 CalculateDelayedRuntime(0), false); |
| 283 AddToIncomingQueue(&pending_closure); |
234 } | 284 } |
235 | 285 |
236 void MessageLoop::PostNonNestableDelayedTask( | 286 void MessageLoop::PostNonNestableDelayedTask( |
237 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { | 287 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
238 PostTask_Helper(from_here, task, delay_ms, false); | 288 PendingClosure pending_closure( |
| 289 base::Bind(&TaskClosureAdapter::Run, |
| 290 new TaskClosureAdapter(task, &should_leak_tasks_)), |
| 291 from_here, |
| 292 CalculateDelayedRuntime(delay_ms), false); |
| 293 AddToIncomingQueue(&pending_closure); |
| 294 } |
| 295 |
| 296 void MessageLoop::PostClosure( |
| 297 const tracked_objects::Location& from_here, const base::Closure& closure) { |
| 298 DCHECK(!closure.is_null()); |
| 299 PendingClosure pending_closure(closure, from_here, |
| 300 CalculateDelayedRuntime(0), true); |
| 301 AddToIncomingQueue(&pending_closure); |
| 302 } |
| 303 |
| 304 void MessageLoop::PostDelayedClosure( |
| 305 const tracked_objects::Location& from_here, const base::Closure& closure, |
| 306 int64 delay_ms) { |
| 307 DCHECK(!closure.is_null()); |
| 308 PendingClosure pending_closure(closure, from_here, |
| 309 CalculateDelayedRuntime(delay_ms), true); |
| 310 AddToIncomingQueue(&pending_closure); |
| 311 } |
| 312 |
| 313 void MessageLoop::PostNonNestableClosure( |
| 314 const tracked_objects::Location& from_here, const base::Closure& closure) { |
| 315 DCHECK(!closure.is_null()); |
| 316 PendingClosure pending_closure(closure, from_here, |
| 317 CalculateDelayedRuntime(0), false); |
| 318 AddToIncomingQueue(&pending_closure); |
| 319 } |
| 320 |
| 321 void MessageLoop::PostNonNestableDelayedClosure( |
| 322 const tracked_objects::Location& from_here, const base::Closure& closure, |
| 323 int64 delay_ms) { |
| 324 DCHECK(!closure.is_null()); |
| 325 PendingClosure pending_closure(closure, from_here, |
| 326 CalculateDelayedRuntime(delay_ms), false); |
| 327 AddToIncomingQueue(&pending_closure); |
239 } | 328 } |
240 | 329 |
241 void MessageLoop::Run() { | 330 void MessageLoop::Run() { |
242 AutoRunState save_state(this); | 331 AutoRunState save_state(this); |
243 RunHandler(); | 332 RunHandler(); |
244 } | 333 } |
245 | 334 |
246 void MessageLoop::RunAllPending() { | 335 void MessageLoop::RunAllPending() { |
247 AutoRunState save_state(this); | 336 AutoRunState save_state(this); |
248 state_->quit_received = true; // Means run until we would otherwise block. | 337 state_->quit_received = true; // Means run until we would otherwise block. |
(...skipping 29 matching lines...) Expand all Loading... |
278 } | 367 } |
279 | 368 |
280 bool MessageLoop::NestableTasksAllowed() const { | 369 bool MessageLoop::NestableTasksAllowed() const { |
281 return nestable_tasks_allowed_; | 370 return nestable_tasks_allowed_; |
282 } | 371 } |
283 | 372 |
284 bool MessageLoop::IsNested() { | 373 bool MessageLoop::IsNested() { |
285 return state_->run_depth > 1; | 374 return state_->run_depth > 1; |
286 } | 375 } |
287 | 376 |
288 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) { | 377 void MessageLoop::AddClosureObserver(ClosureObserver* closure_observer) { |
289 DCHECK_EQ(this, current()); | 378 DCHECK_EQ(this, current()); |
290 task_observers_.AddObserver(task_observer); | 379 closure_observers_.AddObserver(closure_observer); |
291 } | 380 } |
292 | 381 |
293 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) { | 382 void MessageLoop::RemoveClosureObserver(ClosureObserver* closure_observer) { |
294 DCHECK_EQ(this, current()); | 383 DCHECK_EQ(this, current()); |
295 task_observers_.RemoveObserver(task_observer); | 384 closure_observers_.RemoveObserver(closure_observer); |
296 } | 385 } |
297 | 386 |
298 void MessageLoop::AssertIdle() const { | 387 void MessageLoop::AssertIdle() const { |
299 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|. | 388 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|. |
300 base::AutoLock lock(incoming_queue_lock_); | 389 base::AutoLock lock(incoming_queue_lock_); |
301 DCHECK(incoming_queue_.empty()); | 390 DCHECK(incoming_queue_.empty()); |
302 } | 391 } |
303 | 392 |
304 //------------------------------------------------------------------------------ | 393 //------------------------------------------------------------------------------ |
305 | 394 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
346 pump_->Run(this); | 435 pump_->Run(this); |
347 } | 436 } |
348 | 437 |
349 bool MessageLoop::ProcessNextDelayedNonNestableTask() { | 438 bool MessageLoop::ProcessNextDelayedNonNestableTask() { |
350 if (state_->run_depth != 1) | 439 if (state_->run_depth != 1) |
351 return false; | 440 return false; |
352 | 441 |
353 if (deferred_non_nestable_work_queue_.empty()) | 442 if (deferred_non_nestable_work_queue_.empty()) |
354 return false; | 443 return false; |
355 | 444 |
356 Task* task = deferred_non_nestable_work_queue_.front().task; | 445 PendingClosure pending_closure = deferred_non_nestable_work_queue_.front(); |
357 deferred_non_nestable_work_queue_.pop(); | 446 deferred_non_nestable_work_queue_.pop(); |
358 | 447 |
359 RunTask(task); | 448 RunClosure(pending_closure); |
360 return true; | 449 return true; |
361 } | 450 } |
362 | 451 |
363 void MessageLoop::RunTask(Task* task) { | 452 void MessageLoop::RunClosure(const PendingClosure& pending_closure) { |
364 DCHECK(nestable_tasks_allowed_); | 453 DCHECK(nestable_tasks_allowed_); |
365 // Execute the task and assume the worst: It is probably not reentrant. | 454 // Execute the task and assume the worst: It is probably not reentrant. |
366 nestable_tasks_allowed_ = false; | 455 nestable_tasks_allowed_ = false; |
367 | 456 |
368 HistogramEvent(kTaskRunEvent); | 457 HistogramEvent(kTaskRunEvent); |
369 FOR_EACH_OBSERVER(TaskObserver, task_observers_, | 458 FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
370 WillProcessTask(task)); | 459 WillProcessClosure(pending_closure.time_posted)); |
371 task->Run(); | 460 pending_closure.closure.Run(); |
372 FOR_EACH_OBSERVER(TaskObserver, task_observers_, DidProcessTask(task)); | 461 FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
373 delete task; | 462 DidProcessClosure(pending_closure.time_posted)); |
| 463 |
| 464 #if defined(TRACK_ALL_TASK_OBJECTS) |
| 465 if (tracked_objects::ThreadData::IsActive() && pending_closure.post_births) { |
| 466 tracked_objects::ThreadData::current()->TallyADeath( |
| 467 *pending_closure.post_births, |
| 468 TimeTicks::Now() - pending_closure.time_posted); |
| 469 } |
| 470 #endif // defined(TRACK_ALL_TASK_OBJECTS) |
374 | 471 |
375 nestable_tasks_allowed_ = true; | 472 nestable_tasks_allowed_ = true; |
376 } | 473 } |
377 | 474 |
378 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) { | 475 bool MessageLoop::DeferOrRunPendingClosure( |
379 if (pending_task.nestable || state_->run_depth == 1) { | 476 const PendingClosure& pending_closure) { |
380 RunTask(pending_task.task); | 477 if (pending_closure.nestable || state_->run_depth == 1) { |
| 478 RunClosure(pending_closure); |
381 // Show that we ran a task (Note: a new one might arrive as a | 479 // Show that we ran a task (Note: a new one might arrive as a |
382 // consequence!). | 480 // consequence!). |
383 return true; | 481 return true; |
384 } | 482 } |
385 | 483 |
386 // We couldn't run the task now because we're in a nested message loop | 484 // We couldn't run the task now because we're in a nested message loop |
387 // and the task isn't nestable. | 485 // and the task isn't nestable. |
388 deferred_non_nestable_work_queue_.push(pending_task); | 486 deferred_non_nestable_work_queue_.push(pending_closure); |
389 return false; | 487 return false; |
390 } | 488 } |
391 | 489 |
392 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) { | 490 void MessageLoop::AddToDelayedWorkQueue(const PendingClosure& pending_closure) { |
393 // Move to the delayed work queue. Initialize the sequence number | 491 // Move to the delayed work queue. Initialize the sequence number |
394 // before inserting into the delayed_work_queue_. The sequence number | 492 // before inserting into the delayed_work_queue_. The sequence number |
395 // is used to faciliate FIFO sorting when two tasks have the same | 493 // is used to faciliate FIFO sorting when two tasks have the same |
396 // delayed_run_time value. | 494 // delayed_run_time value. |
397 PendingTask new_pending_task(pending_task); | 495 PendingClosure new_pending_closure(pending_closure); |
398 new_pending_task.sequence_num = next_sequence_num_++; | 496 new_pending_closure.sequence_num = next_sequence_num_++; |
399 delayed_work_queue_.push(new_pending_task); | 497 delayed_work_queue_.push(new_pending_closure); |
400 } | 498 } |
401 | 499 |
402 void MessageLoop::ReloadWorkQueue() { | 500 void MessageLoop::ReloadWorkQueue() { |
403 // We can improve performance of our loading tasks from incoming_queue_ to | 501 // We can improve performance of our loading tasks from incoming_queue_ to |
404 // work_queue_ by waiting until the last minute (work_queue_ is empty) to | 502 // work_queue_ by waiting until the last minute (work_queue_ is empty) to |
405 // load. That reduces the number of locks-per-task significantly when our | 503 // load. That reduces the number of locks-per-task significantly when our |
406 // queues get large. | 504 // queues get large. |
407 if (!work_queue_.empty()) | 505 if (!work_queue_.empty()) |
408 return; // Wait till we *really* need to lock and load. | 506 return; // Wait till we *really* need to lock and load. |
409 | 507 |
410 // Acquire all we can from the inter-thread queue with one lock acquisition. | 508 // Acquire all we can from the inter-thread queue with one lock acquisition. |
411 { | 509 { |
412 base::AutoLock lock(incoming_queue_lock_); | 510 base::AutoLock lock(incoming_queue_lock_); |
413 if (incoming_queue_.empty()) | 511 if (incoming_queue_.empty()) |
414 return; | 512 return; |
415 incoming_queue_.Swap(&work_queue_); // Constant time | 513 incoming_queue_.Swap(&work_queue_); // Constant time |
416 DCHECK(incoming_queue_.empty()); | 514 DCHECK(incoming_queue_.empty()); |
417 } | 515 } |
418 } | 516 } |
419 | 517 |
420 bool MessageLoop::DeletePendingTasks() { | 518 bool MessageLoop::DeletePendingClosures() { |
421 bool did_work = !work_queue_.empty(); | 519 bool did_work = !work_queue_.empty(); |
| 520 // TODO(darin): Delete all tasks once it is safe to do so. |
| 521 // Until it is totally safe, just do it when running Purify or |
| 522 // Valgrind. |
| 523 // |
| 524 #if defined(PURIFY) || defined(USE_HEAPCHECKER) |
| 525 should_leak_tasks_ = false; |
| 526 #else |
| 527 if (RunningOnValgrind()) |
| 528 should_leak_tasks_ = false; |
| 529 #endif // defined(OS_POSIX) |
422 while (!work_queue_.empty()) { | 530 while (!work_queue_.empty()) { |
423 PendingTask pending_task = work_queue_.front(); | 531 PendingClosure pending_closure = work_queue_.front(); |
424 work_queue_.pop(); | 532 work_queue_.pop(); |
425 if (!pending_task.delayed_run_time.is_null()) { | 533 if (!pending_closure.delayed_run_time.is_null()) { |
426 // We want to delete delayed tasks in the same order in which they would | 534 // We want to delete delayed tasks in the same order in which they would |
427 // normally be deleted in case of any funny dependencies between delayed | 535 // normally be deleted in case of any funny dependencies between delayed |
428 // tasks. | 536 // tasks. |
429 AddToDelayedWorkQueue(pending_task); | 537 AddToDelayedWorkQueue(pending_closure); |
430 } else { | |
431 // TODO(darin): Delete all tasks once it is safe to do so. | |
432 // Until it is totally safe, just do it when running Purify or | |
433 // Valgrind. | |
434 #if defined(PURIFY) || defined(USE_HEAPCHECKER) | |
435 delete pending_task.task; | |
436 #else | |
437 if (RunningOnValgrind()) | |
438 delete pending_task.task; | |
439 #endif // defined(OS_POSIX) | |
440 } | 538 } |
441 } | 539 } |
442 did_work |= !deferred_non_nestable_work_queue_.empty(); | 540 did_work |= !deferred_non_nestable_work_queue_.empty(); |
443 while (!deferred_non_nestable_work_queue_.empty()) { | 541 while (!deferred_non_nestable_work_queue_.empty()) { |
444 // TODO(darin): Delete all tasks once it is safe to do so. | |
445 // Until it is totaly safe, only delete them under Purify and Valgrind. | |
446 Task* task = NULL; | |
447 #if defined(PURIFY) || defined(USE_HEAPCHECKER) | |
448 task = deferred_non_nestable_work_queue_.front().task; | |
449 #else | |
450 if (RunningOnValgrind()) | |
451 task = deferred_non_nestable_work_queue_.front().task; | |
452 #endif | |
453 deferred_non_nestable_work_queue_.pop(); | 542 deferred_non_nestable_work_queue_.pop(); |
454 if (task) | |
455 delete task; | |
456 } | 543 } |
457 did_work |= !delayed_work_queue_.empty(); | 544 did_work |= !delayed_work_queue_.empty(); |
| 545 |
| 546 // Historically, we always delete the task regardless of valgrind status. It's |
| 547 // not completely clear why we want to leak them in the loops above. This |
| 548 // code is replicating legacy behavior, and should not be considered |
| 549 // absolutely "correct" behavior. |
| 550 should_leak_tasks_ = false; |
458 while (!delayed_work_queue_.empty()) { | 551 while (!delayed_work_queue_.empty()) { |
459 Task* task = delayed_work_queue_.top().task; | |
460 delayed_work_queue_.pop(); | 552 delayed_work_queue_.pop(); |
461 delete task; | |
462 } | 553 } |
| 554 should_leak_tasks_ = true; |
463 return did_work; | 555 return did_work; |
464 } | 556 } |
465 | 557 |
466 // Possibly called on a background thread! | 558 TimeTicks MessageLoop::CalculateDelayedRuntime(int64 delay_ms) { |
467 void MessageLoop::PostTask_Helper( | 559 TimeTicks delayed_run_time; |
468 const tracked_objects::Location& from_here, Task* task, int64 delay_ms, | |
469 bool nestable) { | |
470 task->SetBirthPlace(from_here); | |
471 | |
472 PendingTask pending_task(task, nestable); | |
473 | |
474 if (delay_ms > 0) { | 560 if (delay_ms > 0) { |
475 pending_task.delayed_run_time = | 561 delayed_run_time = |
476 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms); | 562 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms); |
477 | 563 |
478 #if defined(OS_WIN) | 564 #if defined(OS_WIN) |
479 if (high_resolution_timer_expiration_.is_null()) { | 565 if (high_resolution_timer_expiration_.is_null()) { |
480 // Windows timers are granular to 15.6ms. If we only set high-res | 566 // Windows timers are granular to 15.6ms. If we only set high-res |
481 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms, | 567 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms, |
482 // which as a percentage is pretty inaccurate. So enable high | 568 // which as a percentage is pretty inaccurate. So enable high |
483 // res timers for any timer which is within 2x of the granularity. | 569 // res timers for any timer which is within 2x of the granularity. |
484 // This is a tradeoff between accuracy and power management. | 570 // This is a tradeoff between accuracy and power management. |
485 bool needs_high_res_timers = | 571 bool needs_high_res_timers = |
(...skipping 11 matching lines...) Expand all Loading... |
497 | 583 |
498 #if defined(OS_WIN) | 584 #if defined(OS_WIN) |
499 if (!high_resolution_timer_expiration_.is_null()) { | 585 if (!high_resolution_timer_expiration_.is_null()) { |
500 if (TimeTicks::Now() > high_resolution_timer_expiration_) { | 586 if (TimeTicks::Now() > high_resolution_timer_expiration_) { |
501 base::Time::ActivateHighResolutionTimer(false); | 587 base::Time::ActivateHighResolutionTimer(false); |
502 high_resolution_timer_expiration_ = TimeTicks(); | 588 high_resolution_timer_expiration_ = TimeTicks(); |
503 } | 589 } |
504 } | 590 } |
505 #endif | 591 #endif |
506 | 592 |
| 593 return delayed_run_time; |
| 594 } |
| 595 |
| 596 // Possibly called on a background thread! |
| 597 void MessageLoop::AddToIncomingQueue(PendingClosure* pending_closure) { |
507 // Warning: Don't try to short-circuit, and handle this thread's tasks more | 598 // Warning: Don't try to short-circuit, and handle this thread's tasks more |
508 // directly, as it could starve handling of foreign threads. Put every task | 599 // directly, as it could starve handling of foreign threads. Put every task |
509 // into this queue. | 600 // into this queue. |
510 | 601 |
511 scoped_refptr<base::MessagePump> pump; | 602 scoped_refptr<base::MessagePump> pump; |
512 { | 603 { |
513 base::AutoLock locked(incoming_queue_lock_); | 604 base::AutoLock locked(incoming_queue_lock_); |
514 | 605 |
515 bool was_empty = incoming_queue_.empty(); | 606 bool was_empty = incoming_queue_.empty(); |
516 incoming_queue_.push(pending_task); | 607 incoming_queue_.push(*pending_closure); |
| 608 pending_closure->closure.Reset(); |
517 if (!was_empty) | 609 if (!was_empty) |
518 return; // Someone else should have started the sub-pump. | 610 return; // Someone else should have started the sub-pump. |
519 | 611 |
520 pump = pump_; | 612 pump = pump_; |
521 } | 613 } |
522 // Since the incoming_queue_ may contain a task that destroys this message | 614 // Since the incoming_queue_ may contain a task that destroys this message |
523 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|. | 615 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|. |
524 // We use a stack-based reference to the message pump so that we can call | 616 // We use a stack-based reference to the message pump so that we can call |
525 // ScheduleWork outside of incoming_queue_lock_. | 617 // ScheduleWork outside of incoming_queue_lock_. |
526 | 618 |
(...skipping 28 matching lines...) Expand all Loading... |
555 return false; | 647 return false; |
556 } | 648 } |
557 | 649 |
558 for (;;) { | 650 for (;;) { |
559 ReloadWorkQueue(); | 651 ReloadWorkQueue(); |
560 if (work_queue_.empty()) | 652 if (work_queue_.empty()) |
561 break; | 653 break; |
562 | 654 |
563 // Execute oldest task. | 655 // Execute oldest task. |
564 do { | 656 do { |
565 PendingTask pending_task = work_queue_.front(); | 657 PendingClosure pending_closure = work_queue_.front(); |
566 work_queue_.pop(); | 658 work_queue_.pop(); |
567 if (!pending_task.delayed_run_time.is_null()) { | 659 if (!pending_closure.delayed_run_time.is_null()) { |
568 AddToDelayedWorkQueue(pending_task); | 660 AddToDelayedWorkQueue(pending_closure); |
569 // If we changed the topmost task, then it is time to re-schedule. | 661 // If we changed the topmost task, then it is time to re-schedule. |
570 if (delayed_work_queue_.top().task == pending_task.task) | 662 if (delayed_work_queue_.top().closure.Equals(pending_closure.closure)) |
571 pump_->ScheduleDelayedWork(pending_task.delayed_run_time); | 663 pump_->ScheduleDelayedWork(pending_closure.delayed_run_time); |
572 } else { | 664 } else { |
573 if (DeferOrRunPendingTask(pending_task)) | 665 if (DeferOrRunPendingClosure(pending_closure)) |
574 return true; | 666 return true; |
575 } | 667 } |
576 } while (!work_queue_.empty()); | 668 } while (!work_queue_.empty()); |
577 } | 669 } |
578 | 670 |
579 // Nothing happened. | 671 // Nothing happened. |
580 return false; | 672 return false; |
581 } | 673 } |
582 | 674 |
583 bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) { | 675 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) { |
584 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) { | 676 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) { |
585 recent_time_ = *next_delayed_work_time = TimeTicks(); | 677 recent_time_ = *next_delayed_work_time = TimeTicks(); |
586 return false; | 678 return false; |
587 } | 679 } |
588 | 680 |
589 // When we "fall behind," there will be a lot of tasks in the delayed work | 681 // When we "fall behind," there will be a lot of tasks in the delayed work |
590 // queue that are ready to run. To increase efficiency when we fall behind, | 682 // queue that are ready to run. To increase efficiency when we fall behind, |
591 // we will only call Time::Now() intermittently, and then process all tasks | 683 // we will only call Time::Now() intermittently, and then process all tasks |
592 // that are ready to run before calling it again. As a result, the more we | 684 // that are ready to run before calling it again. As a result, the more we |
593 // fall behind (and have a lot of ready-to-run delayed tasks), the more | 685 // fall behind (and have a lot of ready-to-run delayed tasks), the more |
594 // efficient we'll be at handling the tasks. | 686 // efficient we'll be at handling the tasks. |
595 | 687 |
596 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time; | 688 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time; |
597 if (next_run_time > recent_time_) { | 689 if (next_run_time > recent_time_) { |
598 recent_time_ = TimeTicks::Now(); // Get a better view of Now(); | 690 recent_time_ = TimeTicks::Now(); // Get a better view of Now(); |
599 if (next_run_time > recent_time_) { | 691 if (next_run_time > recent_time_) { |
600 *next_delayed_work_time = next_run_time; | 692 *next_delayed_work_time = next_run_time; |
601 return false; | 693 return false; |
602 } | 694 } |
603 } | 695 } |
604 | 696 |
605 PendingTask pending_task = delayed_work_queue_.top(); | 697 PendingClosure pending_closure = delayed_work_queue_.top(); |
606 delayed_work_queue_.pop(); | 698 delayed_work_queue_.pop(); |
607 | 699 |
608 if (!delayed_work_queue_.empty()) | 700 if (!delayed_work_queue_.empty()) |
609 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time; | 701 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time; |
610 | 702 |
611 return DeferOrRunPendingTask(pending_task); | 703 return DeferOrRunPendingClosure(pending_closure); |
612 } | 704 } |
613 | 705 |
614 bool MessageLoop::DoIdleWork() { | 706 bool MessageLoop::DoIdleWork() { |
615 if (ProcessNextDelayedNonNestableTask()) | 707 if (ProcessNextDelayedNonNestableTask()) |
616 return true; | 708 return true; |
617 | 709 |
618 if (state_->quit_received) | 710 if (state_->quit_received) |
619 pump_->Quit(); | 711 pump_->Quit(); |
620 | 712 |
621 return false; | 713 return false; |
(...skipping 17 matching lines...) Expand all Loading... |
639 #if !defined(OS_MACOSX) | 731 #if !defined(OS_MACOSX) |
640 dispatcher = NULL; | 732 dispatcher = NULL; |
641 #endif | 733 #endif |
642 } | 734 } |
643 | 735 |
644 MessageLoop::AutoRunState::~AutoRunState() { | 736 MessageLoop::AutoRunState::~AutoRunState() { |
645 loop_->state_ = previous_state_; | 737 loop_->state_ = previous_state_; |
646 } | 738 } |
647 | 739 |
648 //------------------------------------------------------------------------------ | 740 //------------------------------------------------------------------------------ |
649 // MessageLoop::PendingTask | 741 // MessageLoop::PendingClosure |
650 | 742 |
651 bool MessageLoop::PendingTask::operator<(const PendingTask& other) const { | 743 MessageLoop::PendingClosure::PendingClosure( |
| 744 const base::Closure& closure, |
| 745 const tracked_objects::Location& posted_from, |
| 746 TimeTicks delayed_run_time, |
| 747 bool nestable) |
| 748 : closure(closure), |
| 749 time_posted(TimeTicks::Now()), |
| 750 delayed_run_time(delayed_run_time), |
| 751 sequence_num(0), |
| 752 nestable(nestable) { |
| 753 #if defined(TRACK_ALL_TASK_OBJECTS) |
| 754 if (tracked_objects::ThreadData::IsActive()) { |
| 755 tracked_objects::ThreadData* current_thread_data = |
| 756 tracked_objects::ThreadData::current(); |
| 757 if (current_thread_data) { |
| 758 post_births = current_thread_data->TallyABirth(posted_from); |
| 759 } else { |
| 760 // Shutdown started, and this thread wasn't registered. |
| 761 post_births = NULL; |
| 762 } |
| 763 } |
| 764 #endif // defined(TRACK_ALL_TASK_OBJECTS) |
| 765 } |
| 766 |
| 767 bool MessageLoop::PendingClosure::operator<(const PendingClosure& other) const { |
652 // Since the top of a priority queue is defined as the "greatest" element, we | 768 // Since the top of a priority queue is defined as the "greatest" element, we |
653 // need to invert the comparison here. We want the smaller time to be at the | 769 // need to invert the comparison here. We want the smaller time to be at the |
654 // top of the heap. | 770 // top of the heap. |
655 | 771 |
656 if (delayed_run_time < other.delayed_run_time) | 772 if (delayed_run_time < other.delayed_run_time) |
657 return false; | 773 return false; |
658 | 774 |
659 if (delayed_run_time > other.delayed_run_time) | 775 if (delayed_run_time > other.delayed_run_time) |
660 return true; | 776 return true; |
661 | 777 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
717 Watcher *delegate) { | 833 Watcher *delegate) { |
718 return pump_libevent()->WatchFileDescriptor( | 834 return pump_libevent()->WatchFileDescriptor( |
719 fd, | 835 fd, |
720 persistent, | 836 persistent, |
721 static_cast<base::MessagePumpLibevent::Mode>(mode), | 837 static_cast<base::MessagePumpLibevent::Mode>(mode), |
722 controller, | 838 controller, |
723 delegate); | 839 delegate); |
724 } | 840 } |
725 | 841 |
726 #endif | 842 #endif |
OLD | NEW |