Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2010 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/message_loop.h" | 5 #include "base/message_loop.h" |
| 6 | 6 |
| 7 // This needs to be above the gdk includes because gdk #defines Status. | |
|
willchan no longer on Chromium
2011/03/22 23:30:45
We should instead move the platform specific inclu
awong
2011/03/23 04:05:45
Done.
| |
| 8 #include "base/tracked_objects.h" | |
| 9 | |
| 7 #if defined(OS_POSIX) && !defined(OS_MACOSX) | 10 #if defined(OS_POSIX) && !defined(OS_MACOSX) |
| 8 #include <gdk/gdk.h> | 11 #include <gdk/gdk.h> |
| 9 #include <gdk/gdkx.h> | 12 #include <gdk/gdkx.h> |
| 10 #endif | 13 #endif |
| 11 | 14 |
| 12 #include <algorithm> | 15 #include <algorithm> |
| 13 | 16 |
| 17 #include "base/bind.h" | |
| 14 #include "base/compiler_specific.h" | 18 #include "base/compiler_specific.h" |
| 15 #include "base/lazy_instance.h" | 19 #include "base/lazy_instance.h" |
| 16 #include "base/logging.h" | 20 #include "base/logging.h" |
| 17 #include "base/message_pump_default.h" | 21 #include "base/message_pump_default.h" |
| 18 #include "base/metrics/histogram.h" | 22 #include "base/metrics/histogram.h" |
| 23 #include "base/scoped_ptr.h" | |
| 19 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 24 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
| 20 #include "base/threading/thread_local.h" | 25 #include "base/threading/thread_local.h" |
| 21 | 26 |
| 22 #if defined(OS_MACOSX) | 27 #if defined(OS_MACOSX) |
| 23 #include "base/message_pump_mac.h" | 28 #include "base/message_pump_mac.h" |
| 24 #endif | 29 #endif |
| 25 #if defined(OS_POSIX) | 30 #if defined(OS_POSIX) |
| 26 #include "base/message_pump_libevent.h" | 31 #include "base/message_pump_libevent.h" |
| 27 #endif | 32 #endif |
| 28 #if defined(OS_POSIX) && !defined(OS_MACOSX) | 33 #if defined(OS_POSIX) && !defined(OS_MACOSX) |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 74 | 79 |
| 75 // A few events we handle (kindred to messages), and used to profile actions. | 80 // A few events we handle (kindred to messages), and used to profile actions. |
| 76 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent) | 81 VALUE_TO_NUMBER_AND_NAME(kTaskRunEvent) |
| 77 VALUE_TO_NUMBER_AND_NAME(kTimerEvent) | 82 VALUE_TO_NUMBER_AND_NAME(kTimerEvent) |
| 78 | 83 |
| 79 {-1, NULL} // The list must be null terminated, per API to histogram. | 84 {-1, NULL} // The list must be null terminated, per API to histogram. |
| 80 }; | 85 }; |
| 81 | 86 |
| 82 bool enable_histogrammer_ = false; | 87 bool enable_histogrammer_ = false; |
| 83 | 88 |
| 89 // TODO(ajwong): This is one use case for a Owned() tag that behaves like a | |
| 90 // "Unique" pointer. If we had that, and Tasks were always safe to delete on | |
| 91 // MessageLoop shutdown, this class could just be a function. | |
| 92 class TaskClosureAdapter : public base::RefCounted<TaskClosureAdapter> { | |
| 93 public: | |
| 94 // |should_leak_task| points to a flag variable that can be used to determine | |
|
willchan no longer on Chromium
2011/03/22 23:30:45
Why do we need to use a pointer? Can't we determin
awong
2011/03/23 04:05:45
It needs to be a pointer because whether or not th
willchan no longer on Chromium
2011/03/23 18:01:10
I'm not sure if I completely understand. If a Task
awong
2011/03/23 19:33:01
Not quite. In DeletePendingClosure(), there seems
willchan no longer on Chromium
2011/03/23 19:41:56
Hah! I missed that little subtlety. I don't see wh
| |
| 95 // if this class should leak the Task on destruction. This is important | |
| 96 // at MessageLoop shutdown since not all tasks can be safely deleted without | |
| 97 // running. | |
| 98 TaskClosureAdapter(Task* task, bool* should_leak_task) | |
| 99 : task_(task), | |
| 100 should_leak_task_(should_leak_task) { | |
| 101 } | |
| 102 | |
| 103 void Run() { | |
| 104 task_->Run(); | |
| 105 delete task_; | |
| 106 task_ = NULL; | |
| 107 } | |
| 108 | |
| 109 private: | |
| 110 friend class base::RefCounted<TaskClosureAdapter>; | |
| 111 | |
| 112 ~TaskClosureAdapter() { | |
| 113 if (!*should_leak_task_) { | |
| 114 delete task_; | |
| 115 } | |
| 116 } | |
| 117 | |
| 118 Task* task_; | |
|
willchan no longer on Chromium
2011/03/22 23:30:45
I sorta feel like this should be a scoped_ptr to i
awong
2011/03/23 04:05:45
I bounced between the two. In the end, it seemed
| |
| 119 bool* should_leak_task_; | |
| 120 }; | |
| 121 | |
| 84 } // namespace | 122 } // namespace |
| 85 | 123 |
| 86 //------------------------------------------------------------------------------ | 124 //------------------------------------------------------------------------------ |
| 87 | 125 |
| 88 #if defined(OS_WIN) | 126 #if defined(OS_WIN) |
| 89 | 127 |
| 90 // Upon a SEH exception in this thread, it restores the original unhandled | 128 // Upon a SEH exception in this thread, it restores the original unhandled |
| 91 // exception filter. | 129 // exception filter. |
| 92 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) { | 130 static int SEHFilter(LPTOP_LEVEL_EXCEPTION_FILTER old_filter) { |
| 93 ::SetUnhandledExceptionFilter(old_filter); | 131 ::SetUnhandledExceptionFilter(old_filter); |
| 94 return EXCEPTION_CONTINUE_SEARCH; | 132 return EXCEPTION_CONTINUE_SEARCH; |
| 95 } | 133 } |
| 96 | 134 |
| 97 // Retrieves a pointer to the current unhandled exception filter. There | 135 // Retrieves a pointer to the current unhandled exception filter. There |
| 98 // is no standalone getter method. | 136 // is no standalone getter method. |
| 99 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() { | 137 static LPTOP_LEVEL_EXCEPTION_FILTER GetTopSEHFilter() { |
| 100 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL; | 138 LPTOP_LEVEL_EXCEPTION_FILTER top_filter = NULL; |
| 101 top_filter = ::SetUnhandledExceptionFilter(0); | 139 top_filter = ::SetUnhandledExceptionFilter(0); |
| 102 ::SetUnhandledExceptionFilter(top_filter); | 140 ::SetUnhandledExceptionFilter(top_filter); |
| 103 return top_filter; | 141 return top_filter; |
| 104 } | 142 } |
| 105 | 143 |
| 106 #endif // defined(OS_WIN) | 144 #endif // defined(OS_WIN) |
| 107 | 145 |
| 108 //------------------------------------------------------------------------------ | 146 //------------------------------------------------------------------------------ |
| 109 | 147 |
| 110 MessageLoop::TaskObserver::TaskObserver() { | 148 MessageLoop::ClosureObserver::ClosureObserver() { |
| 111 } | 149 } |
| 112 | 150 |
| 113 MessageLoop::TaskObserver::~TaskObserver() { | 151 MessageLoop::ClosureObserver::~ClosureObserver() { |
| 114 } | 152 } |
| 115 | 153 |
| 116 MessageLoop::DestructionObserver::~DestructionObserver() { | 154 MessageLoop::DestructionObserver::~DestructionObserver() { |
| 117 } | 155 } |
| 118 | 156 |
| 119 //------------------------------------------------------------------------------ | 157 //------------------------------------------------------------------------------ |
| 120 | 158 |
| 121 MessageLoop::MessageLoop(Type type) | 159 MessageLoop::MessageLoop(Type type) |
| 122 : type_(type), | 160 : type_(type), |
| 123 nestable_tasks_allowed_(true), | 161 nestable_tasks_allowed_(true), |
| 124 exception_restoration_(false), | 162 exception_restoration_(false), |
| 125 state_(NULL), | 163 state_(NULL), |
| 164 should_leak_tasks_(true), | |
| 126 next_sequence_num_(0) { | 165 next_sequence_num_(0) { |
| 127 DCHECK(!current()) << "should only have one message loop per thread"; | 166 DCHECK(!current()) << "should only have one message loop per thread"; |
| 128 lazy_tls_ptr.Pointer()->Set(this); | 167 lazy_tls_ptr.Pointer()->Set(this); |
| 129 | 168 |
| 130 // TODO(rvargas): Get rid of the OS guards. | 169 // TODO(rvargas): Get rid of the OS guards. |
| 131 #if defined(OS_WIN) | 170 #if defined(OS_WIN) |
| 132 #define MESSAGE_PUMP_UI new base::MessagePumpForUI() | 171 #define MESSAGE_PUMP_UI new base::MessagePumpForUI() |
| 133 #define MESSAGE_PUMP_IO new base::MessagePumpForIO() | 172 #define MESSAGE_PUMP_IO new base::MessagePumpForIO() |
| 134 #elif defined(OS_MACOSX) | 173 #elif defined(OS_MACOSX) |
| 135 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create() | 174 #define MESSAGE_PUMP_UI base::MessagePumpMac::Create() |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 165 DCHECK(!state_); | 204 DCHECK(!state_); |
| 166 | 205 |
| 167 // Clean up any unprocessed tasks, but take care: deleting a task could | 206 // Clean up any unprocessed tasks, but take care: deleting a task could |
| 168 // result in the addition of more tasks (e.g., via DeleteSoon). We set a | 207 // result in the addition of more tasks (e.g., via DeleteSoon). We set a |
| 169 // limit on the number of times we will allow a deleted task to generate more | 208 // limit on the number of times we will allow a deleted task to generate more |
| 170 // tasks. Normally, we should only pass through this loop once or twice. If | 209 // tasks. Normally, we should only pass through this loop once or twice. If |
| 171 // we end up hitting the loop limit, then it is probably due to one task that | 210 // we end up hitting the loop limit, then it is probably due to one task that |
| 172 // is being stubborn. Inspect the queues to see who is left. | 211 // is being stubborn. Inspect the queues to see who is left. |
| 173 bool did_work; | 212 bool did_work; |
| 174 for (int i = 0; i < 100; ++i) { | 213 for (int i = 0; i < 100; ++i) { |
| 175 DeletePendingTasks(); | 214 DeletePendingClosures(); |
| 176 ReloadWorkQueue(); | 215 ReloadWorkQueue(); |
| 177 // If we end up with empty queues, then break out of the loop. | 216 // If we end up with empty queues, then break out of the loop. |
| 178 did_work = DeletePendingTasks(); | 217 did_work = DeletePendingClosures(); |
| 179 if (!did_work) | 218 if (!did_work) |
| 180 break; | 219 break; |
| 181 } | 220 } |
| 182 DCHECK(!did_work); | 221 DCHECK(!did_work); |
| 183 | 222 |
| 184 // Let interested parties have one last shot at accessing this. | 223 // Let interested parties have one last shot at accessing this. |
| 185 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_, | 224 FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_, |
| 186 WillDestroyCurrentMessageLoop()); | 225 WillDestroyCurrentMessageLoop()); |
| 187 | 226 |
| 188 // OK, now make it so that no one can find us. | 227 // OK, now make it so that no one can find us. |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 209 } | 248 } |
| 210 | 249 |
| 211 void MessageLoop::RemoveDestructionObserver( | 250 void MessageLoop::RemoveDestructionObserver( |
| 212 DestructionObserver* destruction_observer) { | 251 DestructionObserver* destruction_observer) { |
| 213 DCHECK_EQ(this, current()); | 252 DCHECK_EQ(this, current()); |
| 214 destruction_observers_.RemoveObserver(destruction_observer); | 253 destruction_observers_.RemoveObserver(destruction_observer); |
| 215 } | 254 } |
| 216 | 255 |
| 217 void MessageLoop::PostTask( | 256 void MessageLoop::PostTask( |
| 218 const tracked_objects::Location& from_here, Task* task) { | 257 const tracked_objects::Location& from_here, Task* task) { |
| 219 PostTask_Helper(from_here, task, 0, true); | 258 PendingClosure pending_closure( |
| 259 base::Bind(&TaskClosureAdapter::Run, | |
| 260 new TaskClosureAdapter(task, &should_leak_tasks_)), | |
| 261 from_here, | |
| 262 CalculateDelayedRuntime(0), true); | |
| 263 // TODO(ajwong): This failed because pending_closure outlives the add to the | |
| 264 // incoming_queue_. | |
| 265 AddToIncomingQueue(&pending_closure); | |
| 220 } | 266 } |
| 221 | 267 |
| 222 void MessageLoop::PostDelayedTask( | 268 void MessageLoop::PostDelayedTask( |
| 223 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { | 269 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
| 224 PostTask_Helper(from_here, task, delay_ms, true); | 270 PendingClosure pending_closure( |
| 271 base::Bind(&TaskClosureAdapter::Run, | |
| 272 new TaskClosureAdapter(task, &should_leak_tasks_)), | |
| 273 from_here, | |
| 274 CalculateDelayedRuntime(delay_ms), true); | |
| 275 AddToIncomingQueue(&pending_closure); | |
| 225 } | 276 } |
| 226 | 277 |
| 227 void MessageLoop::PostNonNestableTask( | 278 void MessageLoop::PostNonNestableTask( |
| 228 const tracked_objects::Location& from_here, Task* task) { | 279 const tracked_objects::Location& from_here, Task* task) { |
| 229 PostTask_Helper(from_here, task, 0, false); | 280 PendingClosure pending_closure( |
| 281 base::Bind(&TaskClosureAdapter::Run, | |
| 282 new TaskClosureAdapter(task, &should_leak_tasks_)), | |
| 283 from_here, | |
| 284 CalculateDelayedRuntime(0), false); | |
| 285 AddToIncomingQueue(&pending_closure); | |
| 230 } | 286 } |
| 231 | 287 |
| 232 void MessageLoop::PostNonNestableDelayedTask( | 288 void MessageLoop::PostNonNestableDelayedTask( |
| 233 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { | 289 const tracked_objects::Location& from_here, Task* task, int64 delay_ms) { |
| 234 PostTask_Helper(from_here, task, delay_ms, false); | 290 PendingClosure pending_closure( |
| 291 base::Bind(&TaskClosureAdapter::Run, | |
| 292 new TaskClosureAdapter(task, &should_leak_tasks_)), | |
| 293 from_here, | |
| 294 CalculateDelayedRuntime(delay_ms), false); | |
| 295 AddToIncomingQueue(&pending_closure); | |
| 296 } | |
| 297 | |
| 298 void MessageLoop::PostClosure( | |
| 299 const tracked_objects::Location& from_here, const base::Closure& closure) { | |
| 300 DCHECK(!closure.is_null()); | |
| 301 PendingClosure pending_closure(closure, from_here, | |
| 302 CalculateDelayedRuntime(0), true); | |
| 303 AddToIncomingQueue(&pending_closure); | |
| 304 } | |
| 305 | |
| 306 void MessageLoop::PostDelayedClosure( | |
| 307 const tracked_objects::Location& from_here, const base::Closure& closure, | |
| 308 int64 delay_ms) { | |
| 309 DCHECK(!closure.is_null()); | |
| 310 PendingClosure pending_closure(closure, from_here, | |
| 311 CalculateDelayedRuntime(delay_ms), true); | |
| 312 AddToIncomingQueue(&pending_closure); | |
| 313 } | |
| 314 | |
| 315 void MessageLoop::PostNonNestableClosure( | |
| 316 const tracked_objects::Location& from_here, const base::Closure& closure) { | |
| 317 DCHECK(!closure.is_null()); | |
| 318 PendingClosure pending_closure(closure, from_here, | |
| 319 CalculateDelayedRuntime(0), false); | |
| 320 AddToIncomingQueue(&pending_closure); | |
| 321 } | |
| 322 | |
| 323 void MessageLoop::PostNonNestableDelayedClosure( | |
| 324 const tracked_objects::Location& from_here, const base::Closure& closure, | |
| 325 int64 delay_ms) { | |
| 326 DCHECK(!closure.is_null()); | |
| 327 PendingClosure pending_closure(closure, from_here, | |
| 328 CalculateDelayedRuntime(delay_ms), false); | |
| 329 AddToIncomingQueue(&pending_closure); | |
| 235 } | 330 } |
| 236 | 331 |
| 237 void MessageLoop::Run() { | 332 void MessageLoop::Run() { |
| 238 AutoRunState save_state(this); | 333 AutoRunState save_state(this); |
| 239 RunHandler(); | 334 RunHandler(); |
| 240 } | 335 } |
| 241 | 336 |
| 242 void MessageLoop::RunAllPending() { | 337 void MessageLoop::RunAllPending() { |
| 243 AutoRunState save_state(this); | 338 AutoRunState save_state(this); |
| 244 state_->quit_received = true; // Means run until we would otherwise block. | 339 state_->quit_received = true; // Means run until we would otherwise block. |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 274 } | 369 } |
| 275 | 370 |
| 276 bool MessageLoop::NestableTasksAllowed() const { | 371 bool MessageLoop::NestableTasksAllowed() const { |
| 277 return nestable_tasks_allowed_; | 372 return nestable_tasks_allowed_; |
| 278 } | 373 } |
| 279 | 374 |
| 280 bool MessageLoop::IsNested() { | 375 bool MessageLoop::IsNested() { |
| 281 return state_->run_depth > 1; | 376 return state_->run_depth > 1; |
| 282 } | 377 } |
| 283 | 378 |
| 284 void MessageLoop::AddTaskObserver(TaskObserver* task_observer) { | 379 void MessageLoop::AddClosureObserver(ClosureObserver* closure_observer) { |
| 285 DCHECK_EQ(this, current()); | 380 DCHECK_EQ(this, current()); |
| 286 task_observers_.AddObserver(task_observer); | 381 closure_observers_.AddObserver(closure_observer); |
| 287 } | 382 } |
| 288 | 383 |
| 289 void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) { | 384 void MessageLoop::RemoveClosureObserver(ClosureObserver* closure_observer) { |
| 290 DCHECK_EQ(this, current()); | 385 DCHECK_EQ(this, current()); |
| 291 task_observers_.RemoveObserver(task_observer); | 386 closure_observers_.RemoveObserver(closure_observer); |
| 292 } | 387 } |
| 293 | 388 |
| 294 void MessageLoop::AssertIdle() const { | 389 void MessageLoop::AssertIdle() const { |
| 295 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|. | 390 // We only check |incoming_queue_|, since we don't want to lock |work_queue_|. |
| 296 base::AutoLock lock(incoming_queue_lock_); | 391 base::AutoLock lock(incoming_queue_lock_); |
| 297 DCHECK(incoming_queue_.empty()); | 392 DCHECK(incoming_queue_.empty()); |
| 298 } | 393 } |
| 299 | 394 |
| 300 //------------------------------------------------------------------------------ | 395 //------------------------------------------------------------------------------ |
| 301 | 396 |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 342 pump_->Run(this); | 437 pump_->Run(this); |
| 343 } | 438 } |
| 344 | 439 |
| 345 bool MessageLoop::ProcessNextDelayedNonNestableTask() { | 440 bool MessageLoop::ProcessNextDelayedNonNestableTask() { |
| 346 if (state_->run_depth != 1) | 441 if (state_->run_depth != 1) |
| 347 return false; | 442 return false; |
| 348 | 443 |
| 349 if (deferred_non_nestable_work_queue_.empty()) | 444 if (deferred_non_nestable_work_queue_.empty()) |
| 350 return false; | 445 return false; |
| 351 | 446 |
| 352 Task* task = deferred_non_nestable_work_queue_.front().task; | 447 PendingClosure pending_closure = deferred_non_nestable_work_queue_.front(); |
| 353 deferred_non_nestable_work_queue_.pop(); | 448 deferred_non_nestable_work_queue_.pop(); |
| 354 | 449 |
| 355 RunTask(task); | 450 RunClosure(pending_closure); |
| 356 return true; | 451 return true; |
| 357 } | 452 } |
| 358 | 453 |
| 359 void MessageLoop::RunTask(Task* task) { | 454 void MessageLoop::RunClosure(const PendingClosure& pending_closure) { |
| 360 DCHECK(nestable_tasks_allowed_); | 455 DCHECK(nestable_tasks_allowed_); |
| 361 // Execute the task and assume the worst: It is probably not reentrant. | 456 // Execute the task and assume the worst: It is probably not reentrant. |
| 362 nestable_tasks_allowed_ = false; | 457 nestable_tasks_allowed_ = false; |
| 363 | 458 |
| 364 HistogramEvent(kTaskRunEvent); | 459 HistogramEvent(kTaskRunEvent); |
| 365 FOR_EACH_OBSERVER(TaskObserver, task_observers_, | 460 FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
| 366 WillProcessTask(task)); | 461 WillProcessClosure(pending_closure.time_posted)); |
| 367 task->Run(); | 462 pending_closure.closure.Run(); |
| 368 FOR_EACH_OBSERVER(TaskObserver, task_observers_, DidProcessTask(task)); | 463 FOR_EACH_OBSERVER(ClosureObserver, closure_observers_, |
| 369 delete task; | 464 DidProcessClosure(pending_closure.time_posted)); |
| 465 | |
| 466 #if defined(TRACK_ALL_TASK_OBJECTS) | |
|
willchan no longer on Chromium
2011/03/22 23:30:45
Where are the corresponding changes to remove this
awong
2011/03/23 04:05:45
WorkerPool is not migrated yet so I can't break th
willchan no longer on Chromium
2011/03/23 18:01:10
My preference would be to do it in the same CL, bu
awong
2011/03/23 19:33:01
Fixing WorkerPool will take a bit of works since I
willchan no longer on Chromium
2011/03/23 19:41:56
Fine with me.
| |
| 467 if (tracked_objects::ThreadData::IsActive() && pending_closure.post_births) { | |
| 468 tracked_objects::ThreadData::current()->TallyADeath( | |
| 469 *pending_closure.post_births, | |
| 470 TimeTicks::Now() - pending_closure.time_posted); | |
| 471 } | |
| 472 #endif // defined(TRACK_ALL_TASK_OBJECTS) | |
| 370 | 473 |
| 371 nestable_tasks_allowed_ = true; | 474 nestable_tasks_allowed_ = true; |
| 372 } | 475 } |
| 373 | 476 |
| 374 bool MessageLoop::DeferOrRunPendingTask(const PendingTask& pending_task) { | 477 bool MessageLoop::DeferOrRunPendingClosure( |
| 375 if (pending_task.nestable || state_->run_depth == 1) { | 478 const PendingClosure& pending_closure) { |
| 376 RunTask(pending_task.task); | 479 if (pending_closure.nestable || state_->run_depth == 1) { |
| 480 RunClosure(pending_closure); | |
| 377 // Show that we ran a task (Note: a new one might arrive as a | 481 // Show that we ran a task (Note: a new one might arrive as a |
| 378 // consequence!). | 482 // consequence!). |
| 379 return true; | 483 return true; |
| 380 } | 484 } |
| 381 | 485 |
| 382 // We couldn't run the task now because we're in a nested message loop | 486 // We couldn't run the task now because we're in a nested message loop |
| 383 // and the task isn't nestable. | 487 // and the task isn't nestable. |
| 384 deferred_non_nestable_work_queue_.push(pending_task); | 488 deferred_non_nestable_work_queue_.push(pending_closure); |
| 385 return false; | 489 return false; |
| 386 } | 490 } |
| 387 | 491 |
| 388 void MessageLoop::AddToDelayedWorkQueue(const PendingTask& pending_task) { | 492 void MessageLoop::AddToDelayedWorkQueue(const PendingClosure& pending_closure) { |
| 389 // Move to the delayed work queue. Initialize the sequence number | 493 // Move to the delayed work queue. Initialize the sequence number |
| 390 // before inserting into the delayed_work_queue_. The sequence number | 494 // before inserting into the delayed_work_queue_. The sequence number |
| 391 // is used to faciliate FIFO sorting when two tasks have the same | 495 // is used to faciliate FIFO sorting when two tasks have the same |
| 392 // delayed_run_time value. | 496 // delayed_run_time value. |
| 393 PendingTask new_pending_task(pending_task); | 497 PendingClosure new_pending_closure(pending_closure); |
| 394 new_pending_task.sequence_num = next_sequence_num_++; | 498 new_pending_closure.sequence_num = next_sequence_num_++; |
| 395 delayed_work_queue_.push(new_pending_task); | 499 delayed_work_queue_.push(new_pending_closure); |
| 396 } | 500 } |
| 397 | 501 |
| 398 void MessageLoop::ReloadWorkQueue() { | 502 void MessageLoop::ReloadWorkQueue() { |
| 399 // We can improve performance of our loading tasks from incoming_queue_ to | 503 // We can improve performance of our loading tasks from incoming_queue_ to |
| 400 // work_queue_ by waiting until the last minute (work_queue_ is empty) to | 504 // work_queue_ by waiting until the last minute (work_queue_ is empty) to |
| 401 // load. That reduces the number of locks-per-task significantly when our | 505 // load. That reduces the number of locks-per-task significantly when our |
| 402 // queues get large. | 506 // queues get large. |
| 403 if (!work_queue_.empty()) | 507 if (!work_queue_.empty()) |
| 404 return; // Wait till we *really* need to lock and load. | 508 return; // Wait till we *really* need to lock and load. |
| 405 | 509 |
| 406 // Acquire all we can from the inter-thread queue with one lock acquisition. | 510 // Acquire all we can from the inter-thread queue with one lock acquisition. |
| 407 { | 511 { |
| 408 base::AutoLock lock(incoming_queue_lock_); | 512 base::AutoLock lock(incoming_queue_lock_); |
| 409 if (incoming_queue_.empty()) | 513 if (incoming_queue_.empty()) |
| 410 return; | 514 return; |
| 411 incoming_queue_.Swap(&work_queue_); // Constant time | 515 incoming_queue_.Swap(&work_queue_); // Constant time |
| 412 DCHECK(incoming_queue_.empty()); | 516 DCHECK(incoming_queue_.empty()); |
| 413 } | 517 } |
| 414 } | 518 } |
| 415 | 519 |
| 416 bool MessageLoop::DeletePendingTasks() { | 520 bool MessageLoop::DeletePendingClosures() { |
| 417 bool did_work = !work_queue_.empty(); | 521 bool did_work = !work_queue_.empty(); |
| 522 // TODO(darin): Delete all tasks once it is safe to do so. | |
| 523 // Until it is totally safe, just do it when running Purify or | |
| 524 // Valgrind. | |
| 525 // | |
| 526 #if defined(PURIFY) || defined(USE_HEAPCHECKER) | |
| 527 should_leak_tasks_ = false; | |
| 528 #else | |
| 529 if (RunningOnValgrind()) | |
| 530 should_leak_tasks_ = false; | |
| 531 #endif // defined(OS_POSIX) | |
| 418 while (!work_queue_.empty()) { | 532 while (!work_queue_.empty()) { |
| 419 PendingTask pending_task = work_queue_.front(); | 533 PendingClosure pending_closure = work_queue_.front(); |
| 420 work_queue_.pop(); | 534 work_queue_.pop(); |
| 421 if (!pending_task.delayed_run_time.is_null()) { | 535 if (!pending_closure.delayed_run_time.is_null()) { |
| 422 // We want to delete delayed tasks in the same order in which they would | 536 // We want to delete delayed tasks in the same order in which they would |
| 423 // normally be deleted in case of any funny dependencies between delayed | 537 // normally be deleted in case of any funny dependencies between delayed |
| 424 // tasks. | 538 // tasks. |
| 425 AddToDelayedWorkQueue(pending_task); | 539 AddToDelayedWorkQueue(pending_closure); |
| 426 } else { | |
| 427 // TODO(darin): Delete all tasks once it is safe to do so. | |
| 428 // Until it is totally safe, just do it when running Purify or | |
| 429 // Valgrind. | |
| 430 #if defined(PURIFY) || defined(USE_HEAPCHECKER) | |
| 431 delete pending_task.task; | |
| 432 #else | |
| 433 if (RunningOnValgrind()) | |
| 434 delete pending_task.task; | |
| 435 #endif // defined(OS_POSIX) | |
| 436 } | 540 } |
| 437 } | 541 } |
| 438 did_work |= !deferred_non_nestable_work_queue_.empty(); | 542 did_work |= !deferred_non_nestable_work_queue_.empty(); |
| 439 while (!deferred_non_nestable_work_queue_.empty()) { | 543 while (!deferred_non_nestable_work_queue_.empty()) { |
| 440 // TODO(darin): Delete all tasks once it is safe to do so. | |
| 441 // Until it is totaly safe, only delete them under Purify and Valgrind. | |
| 442 Task* task = NULL; | |
| 443 #if defined(PURIFY) || defined(USE_HEAPCHECKER) | |
| 444 task = deferred_non_nestable_work_queue_.front().task; | |
| 445 #else | |
| 446 if (RunningOnValgrind()) | |
| 447 task = deferred_non_nestable_work_queue_.front().task; | |
| 448 #endif | |
| 449 deferred_non_nestable_work_queue_.pop(); | 544 deferred_non_nestable_work_queue_.pop(); |
| 450 if (task) | |
| 451 delete task; | |
| 452 } | 545 } |
| 453 did_work |= !delayed_work_queue_.empty(); | 546 did_work |= !delayed_work_queue_.empty(); |
| 547 | |
| 548 // Historically, we always delete the task regardless of valgrind status. It's | |
| 549 // not completely clear why we want to leak them in the loops above. This | |
| 550 // code is replicating legacy behavior, and should not be considered | |
| 551 // absolutely "correct" behavior. | |
| 552 should_leak_tasks_ = false; | |
| 454 while (!delayed_work_queue_.empty()) { | 553 while (!delayed_work_queue_.empty()) { |
| 455 Task* task = delayed_work_queue_.top().task; | |
| 456 delayed_work_queue_.pop(); | 554 delayed_work_queue_.pop(); |
| 457 delete task; | |
| 458 } | 555 } |
| 556 should_leak_tasks_ = true; | |
| 459 return did_work; | 557 return did_work; |
| 460 } | 558 } |
| 461 | 559 |
| 462 // Possibly called on a background thread! | 560 TimeTicks MessageLoop::CalculateDelayedRuntime(int64 delay_ms) { |
| 463 void MessageLoop::PostTask_Helper( | 561 TimeTicks delayed_run_time; |
| 464 const tracked_objects::Location& from_here, Task* task, int64 delay_ms, | |
| 465 bool nestable) { | |
| 466 task->SetBirthPlace(from_here); | |
| 467 | |
| 468 PendingTask pending_task(task, nestable); | |
| 469 | |
| 470 if (delay_ms > 0) { | 562 if (delay_ms > 0) { |
| 471 pending_task.delayed_run_time = | 563 delayed_run_time = |
| 472 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms); | 564 TimeTicks::Now() + TimeDelta::FromMilliseconds(delay_ms); |
| 473 | 565 |
| 474 #if defined(OS_WIN) | 566 #if defined(OS_WIN) |
| 475 if (high_resolution_timer_expiration_.is_null()) { | 567 if (high_resolution_timer_expiration_.is_null()) { |
| 476 // Windows timers are granular to 15.6ms. If we only set high-res | 568 // Windows timers are granular to 15.6ms. If we only set high-res |
| 477 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms, | 569 // timers for those under 15.6ms, then a 18ms timer ticks at ~32ms, |
| 478 // which as a percentage is pretty inaccurate. So enable high | 570 // which as a percentage is pretty inaccurate. So enable high |
| 479 // res timers for any timer which is within 2x of the granularity. | 571 // res timers for any timer which is within 2x of the granularity. |
| 480 // This is a tradeoff between accuracy and power management. | 572 // This is a tradeoff between accuracy and power management. |
| 481 bool needs_high_res_timers = | 573 bool needs_high_res_timers = |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 493 | 585 |
| 494 #if defined(OS_WIN) | 586 #if defined(OS_WIN) |
| 495 if (!high_resolution_timer_expiration_.is_null()) { | 587 if (!high_resolution_timer_expiration_.is_null()) { |
| 496 if (TimeTicks::Now() > high_resolution_timer_expiration_) { | 588 if (TimeTicks::Now() > high_resolution_timer_expiration_) { |
| 497 base::Time::ActivateHighResolutionTimer(false); | 589 base::Time::ActivateHighResolutionTimer(false); |
| 498 high_resolution_timer_expiration_ = TimeTicks(); | 590 high_resolution_timer_expiration_ = TimeTicks(); |
| 499 } | 591 } |
| 500 } | 592 } |
| 501 #endif | 593 #endif |
| 502 | 594 |
| 595 return delayed_run_time; | |
| 596 } | |
| 597 | |
| 598 // Possibly called on a background thread! | |
| 599 void MessageLoop::AddToIncomingQueue(PendingClosure* pending_closure) { | |
| 503 // Warning: Don't try to short-circuit, and handle this thread's tasks more | 600 // Warning: Don't try to short-circuit, and handle this thread's tasks more |
| 504 // directly, as it could starve handling of foreign threads. Put every task | 601 // directly, as it could starve handling of foreign threads. Put every task |
| 505 // into this queue. | 602 // into this queue. |
| 506 | 603 |
| 507 scoped_refptr<base::MessagePump> pump; | 604 scoped_refptr<base::MessagePump> pump; |
| 508 { | 605 { |
| 509 base::AutoLock locked(incoming_queue_lock_); | 606 base::AutoLock locked(incoming_queue_lock_); |
| 510 | 607 |
| 511 bool was_empty = incoming_queue_.empty(); | 608 bool was_empty = incoming_queue_.empty(); |
| 512 incoming_queue_.push(pending_task); | 609 incoming_queue_.push(*pending_closure); |
| 610 pending_closure->closure.Reset(); | |
| 513 if (!was_empty) | 611 if (!was_empty) |
| 514 return; // Someone else should have started the sub-pump. | 612 return; // Someone else should have started the sub-pump. |
| 515 | 613 |
| 516 pump = pump_; | 614 pump = pump_; |
| 517 } | 615 } |
| 518 // Since the incoming_queue_ may contain a task that destroys this message | 616 // Since the incoming_queue_ may contain a task that destroys this message |
| 519 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|. | 617 // loop, we cannot exit incoming_queue_lock_ until we are done with |this|. |
| 520 // We use a stack-based reference to the message pump so that we can call | 618 // We use a stack-based reference to the message pump so that we can call |
| 521 // ScheduleWork outside of incoming_queue_lock_. | 619 // ScheduleWork outside of incoming_queue_lock_. |
| 522 | 620 |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 551 return false; | 649 return false; |
| 552 } | 650 } |
| 553 | 651 |
| 554 for (;;) { | 652 for (;;) { |
| 555 ReloadWorkQueue(); | 653 ReloadWorkQueue(); |
| 556 if (work_queue_.empty()) | 654 if (work_queue_.empty()) |
| 557 break; | 655 break; |
| 558 | 656 |
| 559 // Execute oldest task. | 657 // Execute oldest task. |
| 560 do { | 658 do { |
| 561 PendingTask pending_task = work_queue_.front(); | 659 PendingClosure pending_closure = work_queue_.front(); |
| 562 work_queue_.pop(); | 660 work_queue_.pop(); |
| 563 if (!pending_task.delayed_run_time.is_null()) { | 661 if (!pending_closure.delayed_run_time.is_null()) { |
| 564 AddToDelayedWorkQueue(pending_task); | 662 AddToDelayedWorkQueue(pending_closure); |
| 565 // If we changed the topmost task, then it is time to re-schedule. | 663 // If we changed the topmost task, then it is time to re-schedule. |
| 566 if (delayed_work_queue_.top().task == pending_task.task) | 664 if (delayed_work_queue_.top().closure.Equals(pending_closure.closure)) |
| 567 pump_->ScheduleDelayedWork(pending_task.delayed_run_time); | 665 pump_->ScheduleDelayedWork(pending_closure.delayed_run_time); |
| 568 } else { | 666 } else { |
| 569 if (DeferOrRunPendingTask(pending_task)) | 667 if (DeferOrRunPendingClosure(pending_closure)) |
| 570 return true; | 668 return true; |
| 571 } | 669 } |
| 572 } while (!work_queue_.empty()); | 670 } while (!work_queue_.empty()); |
| 573 } | 671 } |
| 574 | 672 |
| 575 // Nothing happened. | 673 // Nothing happened. |
| 576 return false; | 674 return false; |
| 577 } | 675 } |
| 578 | 676 |
| 579 bool MessageLoop::DoDelayedWork(base::TimeTicks* next_delayed_work_time) { | 677 bool MessageLoop::DoDelayedWork(TimeTicks* next_delayed_work_time) { |
| 580 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) { | 678 if (!nestable_tasks_allowed_ || delayed_work_queue_.empty()) { |
| 581 recent_time_ = *next_delayed_work_time = TimeTicks(); | 679 recent_time_ = *next_delayed_work_time = TimeTicks(); |
| 582 return false; | 680 return false; |
| 583 } | 681 } |
| 584 | 682 |
| 585 // When we "fall behind," there will be a lot of tasks in the delayed work | 683 // When we "fall behind," there will be a lot of tasks in the delayed work |
| 586 // queue that are ready to run. To increase efficiency when we fall behind, | 684 // queue that are ready to run. To increase efficiency when we fall behind, |
| 587 // we will only call Time::Now() intermittently, and then process all tasks | 685 // we will only call Time::Now() intermittently, and then process all tasks |
| 588 // that are ready to run before calling it again. As a result, the more we | 686 // that are ready to run before calling it again. As a result, the more we |
| 589 // fall behind (and have a lot of ready-to-run delayed tasks), the more | 687 // fall behind (and have a lot of ready-to-run delayed tasks), the more |
| 590 // efficient we'll be at handling the tasks. | 688 // efficient we'll be at handling the tasks. |
| 591 | 689 |
| 592 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time; | 690 TimeTicks next_run_time = delayed_work_queue_.top().delayed_run_time; |
| 593 if (next_run_time > recent_time_) { | 691 if (next_run_time > recent_time_) { |
| 594 recent_time_ = TimeTicks::Now(); // Get a better view of Now(); | 692 recent_time_ = TimeTicks::Now(); // Get a better view of Now(); |
| 595 if (next_run_time > recent_time_) { | 693 if (next_run_time > recent_time_) { |
| 596 *next_delayed_work_time = next_run_time; | 694 *next_delayed_work_time = next_run_time; |
| 597 return false; | 695 return false; |
| 598 } | 696 } |
| 599 } | 697 } |
| 600 | 698 |
| 601 PendingTask pending_task = delayed_work_queue_.top(); | 699 PendingClosure pending_closure = delayed_work_queue_.top(); |
| 602 delayed_work_queue_.pop(); | 700 delayed_work_queue_.pop(); |
| 603 | 701 |
| 604 if (!delayed_work_queue_.empty()) | 702 if (!delayed_work_queue_.empty()) |
| 605 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time; | 703 *next_delayed_work_time = delayed_work_queue_.top().delayed_run_time; |
| 606 | 704 |
| 607 return DeferOrRunPendingTask(pending_task); | 705 return DeferOrRunPendingClosure(pending_closure); |
| 608 } | 706 } |
| 609 | 707 |
| 610 bool MessageLoop::DoIdleWork() { | 708 bool MessageLoop::DoIdleWork() { |
| 611 if (ProcessNextDelayedNonNestableTask()) | 709 if (ProcessNextDelayedNonNestableTask()) |
| 612 return true; | 710 return true; |
| 613 | 711 |
| 614 if (state_->quit_received) | 712 if (state_->quit_received) |
| 615 pump_->Quit(); | 713 pump_->Quit(); |
| 616 | 714 |
| 617 return false; | 715 return false; |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 635 #if !defined(OS_MACOSX) | 733 #if !defined(OS_MACOSX) |
| 636 dispatcher = NULL; | 734 dispatcher = NULL; |
| 637 #endif | 735 #endif |
| 638 } | 736 } |
| 639 | 737 |
| 640 MessageLoop::AutoRunState::~AutoRunState() { | 738 MessageLoop::AutoRunState::~AutoRunState() { |
| 641 loop_->state_ = previous_state_; | 739 loop_->state_ = previous_state_; |
| 642 } | 740 } |
| 643 | 741 |
| 644 //------------------------------------------------------------------------------ | 742 //------------------------------------------------------------------------------ |
| 645 // MessageLoop::PendingTask | 743 // MessageLoop::PendingClosure |
| 646 | 744 |
| 647 bool MessageLoop::PendingTask::operator<(const PendingTask& other) const { | 745 MessageLoop::PendingClosure::PendingClosure( |
| 746 const base::Closure& closure, | |
| 747 const tracked_objects::Location& posted_from, | |
| 748 TimeTicks delayed_run_time, | |
| 749 bool nestable) | |
| 750 : closure(closure), | |
| 751 time_posted(TimeTicks::Now()), | |
| 752 delayed_run_time(delayed_run_time), sequence_num(0), | |
| 753 nestable(nestable) { | |
| 754 #if defined(TRACK_ALL_TASK_OBJECTS) | |
| 755 if (tracked_objects::ThreadData::IsActive()) { | |
| 756 tracked_objects::ThreadData* current_thread_data = | |
| 757 tracked_objects::ThreadData::current(); | |
| 758 if (current_thread_data) { | |
| 759 post_births = current_thread_data->TallyABirth(posted_from); | |
| 760 } else { | |
| 761 // Shutdown started, and this thread wasn't registered. | |
| 762 post_births = NULL; | |
| 763 } | |
| 764 } | |
| 765 #endif // defined(TRACK_ALL_TASK_OBJECTS) | |
| 766 } | |
| 767 | |
| 768 bool MessageLoop::PendingClosure::operator<(const PendingClosure& other) const { | |
| 648 // Since the top of a priority queue is defined as the "greatest" element, we | 769 // Since the top of a priority queue is defined as the "greatest" element, we |
| 649 // need to invert the comparison here. We want the smaller time to be at the | 770 // need to invert the comparison here. We want the smaller time to be at the |
| 650 // top of the heap. | 771 // top of the heap. |
| 651 | 772 |
| 652 if (delayed_run_time < other.delayed_run_time) | 773 if (delayed_run_time < other.delayed_run_time) |
| 653 return false; | 774 return false; |
| 654 | 775 |
| 655 if (delayed_run_time > other.delayed_run_time) | 776 if (delayed_run_time > other.delayed_run_time) |
| 656 return true; | 777 return true; |
| 657 | 778 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 713 Watcher *delegate) { | 834 Watcher *delegate) { |
| 714 return pump_libevent()->WatchFileDescriptor( | 835 return pump_libevent()->WatchFileDescriptor( |
| 715 fd, | 836 fd, |
| 716 persistent, | 837 persistent, |
| 717 static_cast<base::MessagePumpLibevent::Mode>(mode), | 838 static_cast<base::MessagePumpLibevent::Mode>(mode), |
| 718 controller, | 839 controller, |
| 719 delegate); | 840 delegate); |
| 720 } | 841 } |
| 721 | 842 |
| 722 #endif | 843 #endif |
| OLD | NEW |